qcacld-3.0: Add support for WLAN-IPA WDI 2 SMMU S1 translation

Update WLAN-IPA WDI-2 datapath buffer sharing for SMMU Stage 1
translation support. When SMMU Stage 1 translation is enabled
DMA APIs return IO virtual address(IOVA) instead of physical
address. This IOVA need to mapped to physical address by IPA
module before accessing them

Change-Id: I969ad020d5b423c785539f346286b212ea5830a1
CRS-Fixed: 2072960
Este commit está contenido en:
Sravan Kumar Kairam
2018-02-27 17:43:10 +05:30
cometido por nshrivas
padre edf30dc042
commit b664b6c61b
Se han modificado 11 ficheros con 751 adiciones y 562 borrados

Ver fichero

@@ -2869,6 +2869,7 @@ int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
#else
void cds_smmu_mem_map_setup(qdf_device_t osdev)
{
osdev->smmu_s1_enabled = false;
}
int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)

Ver fichero

@@ -843,6 +843,7 @@ int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
__func__);
pdev->uc_map_reqd = 0;
/* TX resource attach */
error = htt_tx_ipa_uc_attach(
pdev,
@@ -893,67 +894,28 @@ void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
__func__);
}
/**
* htt_ipa_uc_get_resource() - Get uc resource from htt and lower layer
* @pdev: handle to the HTT instance
* @ce_sr_base_paddr: copy engine source ring base physical address
* @ce_sr_ring_size: copy engine source ring size
* @ce_reg_paddr: copy engine register physical address
* @tx_comp_ring_base_paddr: tx comp ring base physical address
* @tx_comp_ring_size: tx comp ring size
* @tx_num_alloc_buffer: number of allocated tx buffer
* @rx_rdy_ring_base_paddr: rx ready ring base physical address
* @rx_rdy_ring_size: rx ready ring size
* @rx_proc_done_idx_paddr: rx process done index physical address
* @rx_proc_done_idx_vaddr: rx process done index virtual address
* @rx2_rdy_ring_base_paddr: rx done ring base physical address
* @rx2_rdy_ring_size: rx done ring size
* @rx2_proc_done_idx_paddr: rx done index physical address
* @rx2_proc_done_idx_vaddr: rx done index virtual address
*
* Return: 0 success
*/
int
htt_ipa_uc_get_resource(htt_pdev_handle pdev,
qdf_dma_addr_t *ce_sr_base_paddr,
qdf_shared_mem_t **ce_sr,
qdf_shared_mem_t **tx_comp_ring,
qdf_shared_mem_t **rx_rdy_ring,
qdf_shared_mem_t **rx2_rdy_ring,
qdf_shared_mem_t **rx_proc_done_idx,
qdf_shared_mem_t **rx2_proc_done_idx,
uint32_t *ce_sr_ring_size,
qdf_dma_addr_t *ce_reg_paddr,
qdf_dma_addr_t *tx_comp_ring_base_paddr,
uint32_t *tx_comp_ring_size,
uint32_t *tx_num_alloc_buffer,
qdf_dma_addr_t *rx_rdy_ring_base_paddr,
uint32_t *rx_rdy_ring_size,
qdf_dma_addr_t *rx_proc_done_idx_paddr,
void **rx_proc_done_idx_vaddr,
qdf_dma_addr_t *rx2_rdy_ring_base_paddr,
uint32_t *rx2_rdy_ring_size,
qdf_dma_addr_t *rx2_proc_done_idx_paddr,
void **rx2_proc_done_idx_vaddr)
uint32_t *tx_num_alloc_buffer)
{
/* Release allocated resource to client */
*tx_comp_ring_base_paddr =
pdev->ipa_uc_tx_rsc.tx_comp_base.paddr;
*tx_comp_ring_size =
(uint32_t) ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev);
*tx_num_alloc_buffer = (uint32_t) pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
*rx_rdy_ring_base_paddr =
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr;
*rx_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_size;
*rx_proc_done_idx_paddr =
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr;
*rx_proc_done_idx_vaddr =
(void *)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr;
*rx2_rdy_ring_base_paddr =
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr;
*rx2_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx2_ind_ring_size;
*rx2_proc_done_idx_paddr =
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr;
*rx2_proc_done_idx_vaddr =
(void *)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr;
*tx_comp_ring = pdev->ipa_uc_tx_rsc.tx_comp_ring;
*rx_rdy_ring = pdev->ipa_uc_rx_rsc.rx_ind_ring;
*rx2_rdy_ring = pdev->ipa_uc_rx_rsc.rx2_ind_ring;
*rx_proc_done_idx = pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx;
*rx2_proc_done_idx = pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx;
*tx_num_alloc_buffer = (uint32_t)pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
/* Get copy engine, bus resource */
htc_ipa_get_ce_resource(pdev->htc_pdev,
ce_sr_base_paddr,
htc_ipa_get_ce_resource(pdev->htc_pdev, ce_sr,
ce_sr_ring_size, ce_reg_paddr);
return 0;

Ver fichero

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1036,7 +1036,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
*msg_word = 0;
/* TX COMP RING BASE LO */
HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_tx_rsc.tx_comp_ring->mem_info));
msg_word++;
*msg_word = 0;
/* TX COMP RING BASE HI, NONE */
@@ -1056,14 +1057,16 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_tx_rsc.tx_ce_idx->mem_info));
msg_word++;
*msg_word = 0;
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx_ind_ring->mem_info));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_HI_SET(*msg_word,
@@ -1077,7 +1080,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx->mem_info));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_HI_SET(*msg_word,
@@ -1095,7 +1099,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx2_ind_ring->mem_info));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_HI_SET(*msg_word,
@@ -1109,7 +1114,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx->mem_info));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_HI_SET(*msg_word,
@@ -1118,7 +1124,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_LO_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx->mem_info));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_HI_SET(*msg_word,
@@ -1178,7 +1185,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_tx_rsc.tx_comp_ring->mem_info));
msg_word++;
*msg_word = 0;
@@ -1194,12 +1202,14 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_tx_rsc.tx_ce_idx->mem_info));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx_ind_ring->mem_info));
msg_word++;
*msg_word = 0;
@@ -1209,7 +1219,8 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
(unsigned int)qdf_mem_get_dma_addr(pdev->osdev,
&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx->mem_info));
msg_word++;
*msg_word = 0;
@@ -1256,6 +1267,16 @@ int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && uc_active && !is_tx) {
if (htt_rx_ipa_uc_buf_pool_map(pdev)) {
qdf_print("%s: Unable to create mapping for IPA rx buffers\n",
__func__);
htt_htc_pkt_free(pdev, pkt);
return -A_NO_MEMORY;
}
pdev->uc_map_reqd = 1;
}
/* reserve room for HTC header */
msg = qdf_nbuf_alloc(pdev->osdev,
HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),

Ver fichero

@@ -588,6 +588,14 @@ htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size);
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
/**
* htt_rx_ipa_uc_buf_pool_map() - create mappings for IPA rx buffers
* @pdev: htt context
*
* Return: 0 success
*/
int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev);
#else
/**
* htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
@@ -629,6 +637,12 @@ static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
return 0;
}
static inline int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev)
{
return 0;
}
#endif /* IPA_OFFLOAD */
/* Maximum Outstanding Bus Download */

Ver fichero

@@ -485,12 +485,23 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
struct htt_host_rx_desc_base *rx_desc;
int filled = 0;
int debt_served = 0;
qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
int num_alloc = 0;
idx = *(pdev->rx_ring.alloc_idx.vaddr);
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
mem_map_table = qdf_mem_map_table_alloc(num);
if (!mem_map_table) {
qdf_print("%s: Failed to allocate memory for mem map table\n",
__func__);
goto fail;
}
mem_info = mem_map_table;
}
moretofill:
while (num > 0) {
qdf_dma_addr_t paddr;
qdf_dma_addr_t paddr, paddr_marked;
qdf_nbuf_t rx_netbuf;
int headroom;
@@ -513,7 +524,7 @@ moretofill:
qdf_timer_start(
&pdev->rx_ring.refill_retry_timer,
HTT_RX_RING_REFILL_RETRY_TIME_MS);
goto fail;
goto free_mem_map_table;
}
/* Clear rx_desc attention word before posting to Rx ring */
@@ -550,13 +561,13 @@ moretofill:
#endif
if (status != QDF_STATUS_SUCCESS) {
qdf_nbuf_free(rx_netbuf);
goto fail;
goto free_mem_map_table;
}
paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
paddr = htt_rx_paddr_mark_high_bits(paddr);
paddr_marked = htt_rx_paddr_mark_high_bits(paddr);
if (pdev->cfg.is_full_reorder_offload) {
if (qdf_unlikely(htt_rx_hash_list_insert(
pdev, paddr, rx_netbuf))) {
pdev, paddr_marked, rx_netbuf))) {
QDF_TRACE(QDF_MODULE_ID_HTT,
QDF_TRACE_LEVEL_ERROR,
"%s: hash insert failed!", __func__);
@@ -568,13 +579,21 @@ moretofill:
QDF_DMA_FROM_DEVICE);
#endif
qdf_nbuf_free(rx_netbuf);
goto fail;
goto free_mem_map_table;
}
htt_rx_dbg_rxbuf_set(pdev, paddr, rx_netbuf);
htt_rx_dbg_rxbuf_set(pdev, paddr_marked, rx_netbuf);
} else {
pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
}
pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
qdf_update_mem_map_table(pdev->osdev, mem_info,
paddr, HTT_RX_BUF_SIZE);
mem_info++;
num_alloc++;
}
pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked;
pdev->rx_ring.fill_cnt++;
num--;
@@ -588,6 +607,12 @@ moretofill:
goto moretofill;
}
free_mem_map_table:
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
cds_smmu_map_unmap(true, num_alloc, mem_map_table);
qdf_mem_free(mem_map_table);
}
fail:
/*
* Make sure alloc index write is reflected correctly before FW polls
@@ -2332,6 +2357,9 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
struct htt_host_rx_desc_base *rx_desc;
enum rx_pkt_fate status = RX_PKT_FATE_SUCCESS;
qdf_dma_addr_t paddr;
qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
uint32_t num_unmapped = 0;
int ret = 1;
HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
@@ -2347,17 +2375,26 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
/* Get the total number of MSDUs */
msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
HTT_RX_CHECK_MSDU_COUNT(msdu_count);
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
mem_map_table = qdf_mem_map_table_alloc(msdu_count);
if (!mem_map_table) {
qdf_print("%s: Failed to allocate memory for mem map table\n",
__func__);
return 0;
}
mem_info = mem_map_table;
}
ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
msg_word =
(uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
if (offload_ind) {
ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
msg_word);
*head_msdu = *tail_msdu = NULL;
return 0;
ret = 0;
goto free_mem_map_table;
}
paddr = htt_rx_in_ord_paddr_get(msg_word);
@@ -2367,10 +2404,18 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
qdf_print("%s: netbuf pop failed!\n", __func__);
*tail_msdu = NULL;
pdev->rx_ring.pop_fail_cnt++;
return 0;
ret = 0;
goto free_mem_map_table;
}
while (msdu_count > 0) {
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
qdf_update_mem_map_table(pdev->osdev, mem_info,
QDF_NBUF_CB_PADDR(msdu),
HTT_RX_BUF_SIZE);
mem_info++;
num_unmapped++;
}
/*
* Set the netbuf length to be the entire buffer length
@@ -2446,11 +2491,12 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
/* if this is the only msdu */
if (!prev) {
*head_msdu = *tail_msdu = NULL;
return 0;
ret = 0;
goto free_mem_map_table;
}
*tail_msdu = prev;
qdf_nbuf_set_next(prev, NULL);
return 1;
goto free_mem_map_table;
} else { /* if this is not the last msdu */
/* get the next msdu */
msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
@@ -2461,7 +2507,8 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
__func__);
*tail_msdu = NULL;
pdev->rx_ring.pop_fail_cnt++;
return 0;
ret = 0;
goto free_mem_map_table;
}
/* if this is not the first msdu, update the
@@ -2493,7 +2540,8 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
__func__);
*tail_msdu = NULL;
pdev->rx_ring.pop_fail_cnt++;
return 0;
ret = 0;
goto free_mem_map_table;
}
qdf_nbuf_set_next(msdu, next);
prev = msdu;
@@ -2504,7 +2552,14 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
}
}
return 1;
free_mem_map_table:
if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
if (num_unmapped)
cds_smmu_map_unmap(false, num_unmapped,
mem_map_table);
qdf_mem_free(mem_map_table);
}
return ret;
}
#endif
@@ -3812,47 +3867,53 @@ static int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
*
* RX indication ring size, by bytes
*/
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
rx_ind_ring_elements * sizeof(target_paddr_t);
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev, pdev->osdev->dev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
&pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
qdf_print("%s: RX IND RING alloc fail", __func__);
return -ENOBUFS;
pdev->ipa_uc_rx_rsc.rx2_ind_ring =
qdf_mem_shared_mem_alloc(pdev->osdev,
rx_ind_ring_elements *
sizeof(qdf_dma_addr_t));
if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring) {
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
"%s: Unable to allocate memory for IPA rx2 ind ring",
__func__);
return 1;
}
qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
/* Allocate RX process done index */
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev, pdev->osdev->dev, 4,
&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
qdf_print("%s: RX PROC DONE IND alloc fail", __func__);
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
return -ENOBUFS;
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx =
qdf_mem_shared_mem_alloc(pdev->osdev, 4);
if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx) {
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
"%s: Unable to allocate memory for IPA rx proc done index",
__func__);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring);
return 1;
}
qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
return 0;
}
/**
* htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
* @pdev: htt context
*
* Return: None
*/
static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
{
qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_rx_rsc.rx2_ind_ring);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx);
}
#else
static int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
unsigned int rx_ind_ring_elements)
{
return 0;
}
static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
{
}
#endif
/**
@@ -3874,113 +3935,84 @@ int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
* 2bytes: VDEV ID
* 2bytes: length
*/
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev,
pdev->osdev->dev,
rx_ind_ring_elements *
sizeof(struct ipa_uc_rx_ring_elem_t),
&pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
qdf_print("%s: RX IND RING alloc fail", __func__);
return -ENOBUFS;
pdev->ipa_uc_rx_rsc.rx_ind_ring =
qdf_mem_shared_mem_alloc(pdev->osdev,
rx_ind_ring_elements *
sizeof(struct ipa_uc_rx_ring_elem_t));
if (!pdev->ipa_uc_rx_rsc.rx_ind_ring) {
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
"%s: Unable to allocate memory for IPA rx ind ring",
__func__);
return 1;
}
/* RX indication ring size, by bytes */
pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
/* Allocate RX process done index */
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev, pdev->osdev->dev, 4,
&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
qdf_print("%s: RX PROC DONE IND alloc fail", __func__);
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx_ind_ring_base),
memctx));
return -ENOBUFS;
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx =
qdf_mem_shared_mem_alloc(pdev->osdev, 4);
if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx) {
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
"%s: Unable to allocate memory for IPA rx proc done index",
__func__);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_rx_rsc.rx_ind_ring);
return 1;
}
qdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
if (ret) {
qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_rx_rsc.rx_ind_ring);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx);
}
return ret;
}
#ifdef QCA_WIFI_3_0
/**
* htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
* @pdev: htt context
*
* Return: None
*/
static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
{
if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
}
if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
4,
pdev->ipa_uc_rx_rsc.
rx2_ipa_prc_done_idx.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ipa_prc_done_idx),
memctx));
}
}
#else
static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
{
}
#endif
int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx_ind_ring_base),
memctx));
}
if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
4,
pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx.vaddr,
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ipa_prc_done_idx),
memctx));
}
qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_rx_rsc.rx_ind_ring);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx);
htt_rx_ipa_uc_free_wdi2_rsc(pdev);
return 0;
}
int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev)
{
struct htt_rx_hash_entry *hash_entry;
struct htt_list_node *list_iter = NULL;
qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
uint32_t num_alloc = 0;
uint32_t i;
mem_map_table = qdf_mem_map_table_alloc(HTT_RX_RING_SIZE_MAX);
if (!mem_map_table) {
qdf_print("%s: Failed to allocate memory for mem map table\n",
__func__);
return 1;
}
mem_info = mem_map_table;
for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
list_iter = pdev->rx_ring.hash_table[i]->listhead.next;
while (list_iter != &pdev->rx_ring.hash_table[i]->listhead) {
hash_entry = (struct htt_rx_hash_entry *)(
(char *)list_iter -
pdev->rx_ring.listnode_offset);
if (hash_entry->netbuf) {
qdf_update_mem_map_table(pdev->osdev,
mem_info,
QDF_NBUF_CB_PADDR(hash_entry->netbuf),
HTT_RX_BUF_SIZE);
mem_info++;
num_alloc++;
}
list_iter = list_iter->next;
}
}
cds_smmu_map_unmap(true, num_alloc, mem_map_table);
qdf_mem_free(mem_map_table);
return 0;
}
#endif /* IPA_OFFLOAD */
/**

Ver fichero

@@ -1070,26 +1070,39 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
{
unsigned int tx_buffer_count;
unsigned int tx_buffer_count_pwr2;
void *buffer_vaddr;
qdf_dma_addr_t buffer_paddr;
uint32_t *header_ptr;
qdf_dma_addr_t *ring_vaddr;
uint16_t idx;
qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
qdf_shared_mem_t *shared_tx_buffer;
ring_vaddr = (qdf_dma_addr_t *)pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
mem_map_table = qdf_mem_map_table_alloc(uc_tx_buf_cnt);
if (!mem_map_table) {
qdf_print("%s: Failed to allocate memory for mem map table\n",
__func__);
return 0;
}
mem_info = mem_map_table;
}
ring_vaddr = (qdf_dma_addr_t *)pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
buffer_vaddr = qdf_mem_alloc_consistent(pdev->osdev,
pdev->osdev->dev, uc_tx_buf_sz, &buffer_paddr);
if (!buffer_vaddr) {
shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
uc_tx_buf_sz);
if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
qdf_print("IPA WDI TX buffer alloc fail %d allocated\n",
tx_buffer_count);
goto pwr2;
}
header_ptr = buffer_vaddr;
header_ptr = shared_tx_buffer->vaddr;
buffer_paddr = qdf_mem_get_dma_addr(pdev->osdev,
&shared_tx_buffer->mem_info);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
@@ -1122,13 +1135,18 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
*ring_vaddr = buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
buffer_vaddr;
pdev->ipa_uc_tx_rsc.paddr_strg[tx_buffer_count] =
buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[tx_buffer_count] =
shared_tx_buffer;
/* Memory barrier to ensure actual value updated */
ring_vaddr++;
if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
qdf_update_mem_map_table(pdev->osdev, mem_info,
shared_tx_buffer->mem_info.iova,
uc_tx_buf_sz);
mem_info++;
}
}
pwr2:
@@ -1146,22 +1164,20 @@ pwr2:
/* Free over allocated buffers below power of 2 */
for (idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
ol_cfg_ipa_uc_tx_buf_size(
pdev->ctrl_pdev),
pdev->ipa_uc_tx_rsc.
tx_buf_pool_vaddr_strg[idx],
pdev->ipa_uc_tx_rsc.paddr_strg[idx], 0);
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
idx]);
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] =
NULL;
}
}
}
if (tx_buffer_count_pwr2 < 0) {
qdf_print("%s: Failed to round down Tx buffer count %d",
__func__, tx_buffer_count_pwr2);
tx_buffer_count_pwr2 = 0;
if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
cds_smmu_map_unmap(true, tx_buffer_count_pwr2,
mem_map_table);
qdf_mem_free(mem_map_table);
}
return tx_buffer_count_pwr2;
@@ -1180,12 +1196,15 @@ static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
uint16_t idx;
for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
pdev->ipa_uc_tx_rsc.paddr_strg[idx], 0);
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
if (qdf_mem_smmu_s1_enabled(pdev->osdev))
cds_smmu_map_unmap(false, 1,
&pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
idx]->mem_info);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.
tx_buf_pool_strg[idx]);
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
}
}
}
@@ -1197,28 +1216,40 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
{
unsigned int tx_buffer_count;
unsigned int tx_buffer_count_pwr2;
qdf_nbuf_t buffer_vaddr;
qdf_dma_addr_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
uint16_t idx;
QDF_STATUS status;
qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
qdf_shared_mem_t *shared_tx_buffer;
ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
mem_map_table = qdf_mem_map_table_alloc(uc_tx_buf_cnt);
if (!mem_map_table) {
qdf_print("%s: Failed to allocate memory for mem map table\n",
__func__);
return 0;
}
mem_info = mem_map_table;
}
ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
buffer_vaddr = qdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
uc_tx_buf_sz);
if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
qdf_print("%s: TX BUF alloc fail, loop index: %d",
__func__, tx_buffer_count);
goto pwr2;
}
/* Init buffer */
qdf_mem_zero(qdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
header_ptr = (uint32_t *) qdf_nbuf_data(buffer_vaddr);
qdf_mem_zero(shared_tx_buffer->vaddr, uc_tx_buf_sz);
header_ptr = (uint32_t *)shared_tx_buffer->vaddr;
buffer_paddr = qdf_mem_get_dma_addr(pdev->osdev,
&shared_tx_buffer->mem_info);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
@@ -1227,19 +1258,9 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
/* PKT ID */
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
status = qdf_nbuf_map(pdev->osdev, buffer_vaddr,
QDF_DMA_BIDIRECTIONAL);
if (status != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
"%s: nbuf map failed, loop index: %d",
__func__, tx_buffer_count);
qdf_nbuf_free(buffer_vaddr);
goto pwr2;
}
buffer_paddr = qdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
header_ptr++;
/*FRAG Desc Pointer */
*header_ptr = (uint32_t) (buffer_paddr +
IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
header_ptr++;
@@ -1250,11 +1271,17 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
*ring_vaddr = buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
buffer_vaddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[tx_buffer_count] =
shared_tx_buffer;
/* Memory barrier to ensure actual value updated */
ring_vaddr++;
if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
qdf_update_mem_map_table(pdev->osdev, mem_info,
shared_tx_buffer->mem_info.iova,
uc_tx_buf_sz);
mem_info++;
}
}
pwr2:
@@ -1272,21 +1299,20 @@ pwr2:
/* Free over allocated buffers below power of 2 */
for (idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
buffer_vaddr =
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx];
if (buffer_vaddr) {
qdf_nbuf_unmap(pdev->osdev, buffer_vaddr,
QDF_DMA_BIDIRECTIONAL);
qdf_nbuf_free(buffer_vaddr);
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.
tx_buf_pool_strg[idx]);
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] =
NULL;
}
}
}
if (tx_buffer_count_pwr2 < 0) {
qdf_print("%s: Failed to round down Tx buffer count %d",
__func__, tx_buffer_count_pwr2);
tx_buffer_count_pwr2 = 0;
if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
cds_smmu_map_unmap(true, tx_buffer_count_pwr2,
mem_map_table);
qdf_mem_free(mem_map_table);
}
return tx_buffer_count_pwr2;
@@ -1295,14 +1321,17 @@ pwr2:
static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
{
uint16_t idx;
void *buffer_vaddr;
for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
buffer_vaddr = pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx];
if (buffer_vaddr) {
qdf_nbuf_unmap(pdev->osdev, buffer_vaddr,
QDF_DMA_BIDIRECTIONAL);
qdf_nbuf_free(buffer_vaddr);
if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
if (qdf_mem_smmu_s1_enabled(pdev->osdev))
cds_smmu_map_unmap(false, 1,
&pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
idx]->mem_info);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.
tx_buf_pool_strg[idx]);
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
}
}
}
@@ -1327,48 +1356,39 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
unsigned int tx_comp_ring_size;
/* Allocate CE Write Index WORD */
pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev, pdev->osdev->dev,
4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
qdf_print("%s: CE Write Index WORD alloc fail", __func__);
pdev->ipa_uc_tx_rsc.tx_ce_idx =
qdf_mem_shared_mem_alloc(pdev->osdev, 4);
if (!pdev->ipa_uc_tx_rsc.tx_ce_idx) {
qdf_print("%s: Unable to allocate memory for IPA tx ce idx\n",
__func__);
return -ENOBUFS;
}
/* Allocate TX COMP Ring */
tx_comp_ring_size = uc_tx_buf_cnt * sizeof(target_paddr_t);
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
qdf_mem_alloc_consistent(
pdev->osdev, pdev->osdev->dev,
tx_comp_ring_size,
&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
pdev->ipa_uc_tx_rsc.tx_comp_ring =
qdf_mem_shared_mem_alloc(pdev->osdev,
tx_comp_ring_size);
if (!pdev->ipa_uc_tx_rsc.tx_comp_ring ||
!pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr) {
qdf_print("%s: TX COMP ring alloc fail", __func__);
return_code = -ENOBUFS;
goto free_tx_ce_idx;
}
qdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
/* Allocate TX BUF vAddress Storage */
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
pdev->ipa_uc_tx_rsc.tx_buf_pool_strg =
qdf_mem_malloc(uc_tx_buf_cnt *
sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg));
if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_strg) {
qdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
return_code = -ENOBUFS;
goto free_tx_comp_base;
}
pdev->ipa_uc_tx_rsc.paddr_strg =
qdf_mem_malloc(uc_tx_buf_cnt *
sizeof(pdev->ipa_uc_tx_rsc.paddr_strg));
if (!pdev->ipa_uc_tx_rsc.paddr_strg) {
qdf_print("%s: TX BUF POOL paddr storage alloc fail", __func__);
return_code = -ENOBUFS;
goto free_tx_buf_pool_vaddr_strg;
}
qdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_strg,
uc_tx_buf_cnt *
sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
@@ -1376,26 +1396,13 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
return 0;
free_tx_buf_pool_vaddr_strg:
qdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
free_tx_comp_base:
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
tx_comp_ring_size,
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
qdf_get_dma_mem_context((&pdev->
ipa_uc_tx_rsc.
tx_comp_base),
memctx));
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.tx_comp_ring);
free_tx_ce_idx:
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
4,
pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
qdf_get_dma_mem_context((&pdev->
ipa_uc_tx_rsc.
tx_ce_idx),
memctx));
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.tx_ce_idx);
return return_code;
}
@@ -1410,35 +1417,16 @@ free_tx_ce_idx:
*/
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
4,
pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
qdf_get_dma_mem_context(
(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
memctx));
}
if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
qdf_mem_free_consistent(
pdev->osdev, pdev->osdev->dev,
ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) *
sizeof(target_paddr_t),
pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
qdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
tx_comp_base),
memctx));
}
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.tx_ce_idx);
qdf_mem_shared_mem_free(pdev->osdev,
pdev->ipa_uc_tx_rsc.tx_comp_ring);
/* Free each single buffer */
htt_tx_buf_pool_free(pdev);
/* Free storage */
qdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
qdf_mem_free(pdev->ipa_uc_tx_rsc.paddr_strg);
qdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_strg);
return 0;
}

Ver fichero

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -121,53 +121,35 @@ struct htt_rx_hash_bucket {
#endif
};
/*
* IPA micro controller
* wlan host driver
* firmware shared memory structure
*/
struct uc_shared_mem_t {
uint32_t *vaddr;
qdf_dma_addr_t paddr;
qdf_dma_mem_context(memctx);
};
/*
* Micro controller datapath offload
* WLAN TX resources
*/
struct htt_ipa_uc_tx_resource_t {
struct uc_shared_mem_t tx_ce_idx;
struct uc_shared_mem_t tx_comp_base;
qdf_shared_mem_t *tx_ce_idx;
qdf_shared_mem_t *tx_comp_ring;
uint32_t tx_comp_idx_paddr;
void **tx_buf_pool_vaddr_strg;
qdf_dma_addr_t *paddr_strg;
qdf_shared_mem_t **tx_buf_pool_strg;
uint32_t alloc_tx_buf_cnt;
};
/**
* struct htt_ipa_uc_rx_resource_t
* @rx_rdy_idx_paddr: rx ready index physical address
* @rx_ind_ring_base: rx indication ring base memory info
* @rx_ind_ring: rx indication ring memory info
* @rx_ipa_prc_done_idx: rx process done index memory info
* @rx_ind_ring_size: rx process done ring size
* @rx2_rdy_idx_paddr: rx process done index physical address
* @rx2_ind_ring_base: rx process done indication ring base memory info
* @rx2_ipa_prc_done_idx: rx process done index memory info
* @rx2_ind_ring_size: rx process done ring size
* @rx2_ind_ring: rx2 indication ring memory info
* @rx2_ipa_prc_done_idx: rx2 process done index memory info
*/
struct htt_ipa_uc_rx_resource_t {
qdf_dma_addr_t rx_rdy_idx_paddr;
struct uc_shared_mem_t rx_ind_ring_base;
struct uc_shared_mem_t rx_ipa_prc_done_idx;
uint32_t rx_ind_ring_size;
qdf_shared_mem_t *rx_ind_ring;
qdf_shared_mem_t *rx_ipa_prc_done_idx;
/* 2nd RX ring */
qdf_dma_addr_t rx2_rdy_idx_paddr;
struct uc_shared_mem_t rx2_ind_ring_base;
struct uc_shared_mem_t rx2_ipa_prc_done_idx;
uint32_t rx2_ind_ring_size;
qdf_shared_mem_t *rx2_ind_ring;
qdf_shared_mem_t *rx2_ipa_prc_done_idx;
};
/**
@@ -430,6 +412,7 @@ struct htt_pdev_t {
struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
int uc_map_reqd;
struct htt_tx_credit_t htt_tx_credit;

Ver fichero

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -224,22 +224,32 @@ void htt_display(htt_pdev_handle pdev, int indent);
#ifdef IPA_OFFLOAD
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev);
/**
* htt_ipa_uc_get_resource() - Get uc resource from htt and lower layer
* @pdev - handle to the HTT instance
* @ce_sr - CE source ring DMA mapping info
* @tx_comp_ring - tx completion ring DMA mapping info
* @rx_rdy_ring - rx Ready ring DMA mapping info
* @rx2_rdy_ring - rx2 Ready ring DMA mapping info
* @rx_proc_done_idx - rx process done index
* @rx2_proc_done_idx - rx2 process done index
* @ce_sr_ring_size: copyengine source ring size
* @ce_reg_paddr - CE Register address
* @tx_num_alloc_buffer - Number of TX allocated buffers
*
* Return: 0 success
*/
int
htt_ipa_uc_get_resource(htt_pdev_handle pdev,
qdf_dma_addr_t *ce_sr_base_paddr,
qdf_shared_mem_t **ce_sr,
qdf_shared_mem_t **tx_comp_ring,
qdf_shared_mem_t **rx_rdy_ring,
qdf_shared_mem_t **rx2_rdy_ring,
qdf_shared_mem_t **rx_proc_done_idx,
qdf_shared_mem_t **rx2_proc_done_idx,
uint32_t *ce_sr_ring_size,
qdf_dma_addr_t *ce_reg_paddr,
qdf_dma_addr_t *tx_comp_ring_base_paddr,
uint32_t *tx_comp_ring_size,
uint32_t *tx_num_alloc_buffer,
qdf_dma_addr_t *rx_rdy_ring_base_paddr,
uint32_t *rx_rdy_ring_size,
qdf_dma_addr_t *rx_proc_done_idx_paddr,
void **rx_proc_done_idx_vaddr,
qdf_dma_addr_t *rx2_rdy_ring_base_paddr,
uint32_t *rx2_rdy_ring_size,
qdf_dma_addr_t *rx2_proc_done_idx_paddr,
void **rx2_proc_done_idx_vaddr);
uint32_t *tx_num_alloc_buffer);
int
htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
@@ -271,46 +281,6 @@ static inline int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
return 0;
}
/**
* htt_ipa_uc_get_resource() - Get uc resource from htt and lower layer
* @pdev: handle to the HTT instance
* @ce_sr_base_paddr: copy engine source ring base physical address
* @ce_sr_ring_size: copy engine source ring size
* @ce_reg_paddr: copy engine register physical address
* @tx_comp_ring_base_paddr: tx comp ring base physical address
* @tx_comp_ring_size: tx comp ring size
* @tx_num_alloc_buffer: number of allocated tx buffer
* @rx_rdy_ring_base_paddr: rx ready ring base physical address
* @rx_rdy_ring_size: rx ready ring size
* @rx_proc_done_idx_paddr: rx process done index physical address
* @rx_proc_done_idx_vaddr: rx process done index virtual address
* @rx2_rdy_ring_base_paddr: rx done ring base physical address
* @rx2_rdy_ring_size: rx done ring size
* @rx2_proc_done_idx_paddr: rx done index physical address
* @rx2_proc_done_idx_vaddr: rx done index virtual address
*
* Return: 0 success
*/
static inline int
htt_ipa_uc_get_resource(htt_pdev_handle pdev,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
qdf_dma_addr_t *ce_reg_paddr,
qdf_dma_addr_t *tx_comp_ring_base_paddr,
uint32_t *tx_comp_ring_size,
uint32_t *tx_num_alloc_buffer,
qdf_dma_addr_t *rx_rdy_ring_base_paddr,
uint32_t *rx_rdy_ring_size,
qdf_dma_addr_t *rx_proc_done_idx_paddr,
void **rx_proc_done_idx_vaddr,
qdf_dma_addr_t *rx2_rdy_ring_base_paddr,
uint32_t *rx2_rdy_ring_size,
qdf_dma_addr_t *rx2_proc_done_idx_paddr,
void **rx2_proc_done_idx_vaddr)
{
return 0;
}
/**
* htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
* @pdev: handle to the HTT instance

Ver fichero

@@ -107,28 +107,28 @@ QDF_STATUS ol_txrx_ipa_uc_get_resource(struct cdp_pdev *ppdev)
{
ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)ppdev;
struct ol_txrx_ipa_resources *ipa_res = &pdev->ipa_resource;
qdf_device_t osdev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
htt_ipa_uc_get_resource(pdev->htt_pdev,
&ipa_res->ce_sr_base_paddr,
&ipa_res->ce_sr,
&ipa_res->tx_comp_ring,
&ipa_res->rx_rdy_ring,
&ipa_res->rx2_rdy_ring,
&ipa_res->rx_proc_done_idx,
&ipa_res->rx2_proc_done_idx,
&ipa_res->ce_sr_ring_size,
&ipa_res->ce_reg_paddr,
&ipa_res->tx_comp_ring_base_paddr,
&ipa_res->tx_comp_ring_size,
&ipa_res->tx_num_alloc_buffer,
&ipa_res->rx_rdy_ring_base_paddr,
&ipa_res->rx_rdy_ring_size,
&ipa_res->rx_proc_done_idx_paddr,
&ipa_res->rx_proc_done_idx_vaddr,
&ipa_res->rx2_rdy_ring_base_paddr,
&ipa_res->rx2_rdy_ring_size,
&ipa_res->rx2_proc_done_idx_paddr,
&ipa_res->rx2_proc_done_idx_vaddr);
&ipa_res->tx_num_alloc_buffer);
if ((0 == ipa_res->ce_sr_base_paddr) ||
(0 == ipa_res->tx_comp_ring_base_paddr) ||
(0 == ipa_res->rx_rdy_ring_base_paddr)
if ((0 == qdf_mem_get_dma_addr(osdev,
&ipa_res->ce_sr->mem_info)) ||
(0 == qdf_mem_get_dma_addr(osdev,
&ipa_res->tx_comp_ring->mem_info)) ||
(0 == qdf_mem_get_dma_addr(osdev,
&ipa_res->rx_rdy_ring->mem_info))
#if defined(QCA_WIFI_3_0) && defined(CONFIG_IPA3)
|| (0 == ipa_res->rx2_rdy_ring_base_paddr)
|| (0 == qdf_mem_get_dma_addr(osdev,
&ipa_res->rx2_rdy_ring->mem_info))
#endif
)
return QDF_STATUS_E_FAILURE;
@@ -152,12 +152,12 @@ QDF_STATUS ol_txrx_ipa_uc_set_doorbell_paddr(struct cdp_pdev *ppdev)
int ret;
ret = htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
ipa_res->tx_comp_doorbell_paddr,
ipa_res->rx_ready_doorbell_paddr);
ipa_res->tx_comp_doorbell_dmaaddr,
ipa_res->rx_ready_doorbell_dmaaddr);
if (ret) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"htt_ipa_uc_set_doorbell_paddr fail: %d", ret);
"htt_ipa_uc_set_doorbell_dmaaddr fail: %d", ret);
return QDF_STATUS_E_FAILURE;
}
@@ -303,6 +303,145 @@ static inline void ol_txrx_setup_mcc_sys_pipes(
}
#endif
#ifdef ENABLE_SMMU_S1_TRANSLATION
/**
* ol_txrx_ipa_wdi_tx_smmu_params() - Config IPA TX params
* @ipa_res: IPA resources
* @tx_smmu: IPA WDI pipe setup info
*
* Return: None
*/
static inline void ol_txrx_ipa_wdi_tx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
{
QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
IPA_CLIENT_WLAN1_CONS;
qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(
tx_smmu),
&ipa_res->tx_comp_ring->sgtable,
sizeof(sgtable_t));
QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
ipa_res->tx_comp_ring->mem_info.size;
qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(
tx_smmu),
&ipa_res->ce_sr->sgtable,
sizeof(sgtable_t));
QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
ipa_res->ce_sr_ring_size;
QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
ipa_res->ce_reg_paddr;
QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
ipa_res->tx_num_alloc_buffer;
QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
}
/**
* ol_txrx_ipa_wdi_rx_smmu_params() - Config IPA RX params
* @ipa_res: IPA resources
* @rx_smmu: IPA WDI pipe setup info
*
* Return: None
*/
static inline void ol_txrx_ipa_wdi_rx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu)
{
QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
IPA_CLIENT_WLAN1_PROD;
qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(
rx_smmu),
&ipa_res->rx_rdy_ring->sgtable,
sizeof(sgtable_t));
QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
ipa_res->rx_rdy_ring->mem_info.size;
QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
ipa_res->rx_proc_done_idx->mem_info.pa;
qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(
rx_smmu),
&ipa_res->rx2_rdy_ring->sgtable,
sizeof(sgtable_t));
QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
ipa_res->rx2_rdy_ring->mem_info.size;
QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
ipa_res->rx2_proc_done_idx->mem_info.pa;
QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) = 0;
}
#else
static inline void ol_txrx_ipa_wdi_tx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
{
}
static inline void ol_txrx_ipa_wdi_rx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu)
{
}
#endif
/**
* ol_txrx_ipa_wdi_tx_params() - Config IPA TX params
* @ipa_res: IPA resources
* @tx: IPA WDI pipe setup info
*
* Return: None
*/
static inline void ol_txrx_ipa_wdi_tx_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_pipe_setup_info_t *tx)
{
qdf_device_t osdev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
qdf_mem_get_dma_addr(osdev,
&ipa_res->tx_comp_ring->mem_info);
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
ipa_res->tx_comp_ring->mem_info.size;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
qdf_mem_get_dma_addr(osdev,
&ipa_res->ce_sr->mem_info);
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
ipa_res->ce_sr_ring_size;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
ipa_res->ce_reg_paddr;
QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
ipa_res->tx_num_alloc_buffer;
QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
}
/**
* ol_txrx_ipa_wdi_rx_params() - Config IPA RX params
* @ipa_res: IPA resources
* @rx: IPA WDI pipe setup info
*
* Return: None
*/
static inline void ol_txrx_ipa_wdi_rx_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_pipe_setup_info_t *rx)
{
QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
ipa_res->rx_rdy_ring->mem_info.pa;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
ipa_res->rx_rdy_ring->mem_info.size;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
ipa_res->rx_proc_done_idx->mem_info.pa;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
ipa_res->rx2_rdy_ring->mem_info.pa;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
ipa_res->rx2_rdy_ring->mem_info.size;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
ipa_res->rx2_proc_done_idx->mem_info.pa;
QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 0;
}
/**
* ol_txrx_ipa_setup() - Setup and connect IPA pipes
* @pdev: handle to the device instance
@@ -336,6 +475,8 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu;
qdf_ipa_wdi_conn_in_params_t pipe_in;
qdf_ipa_wdi_conn_out_params_t pipe_out;
qdf_device_t osdev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
uint32_t tx_comp_db_dmaaddr = 0, rx_rdy_db_dmaaddr = 0;
int ret;
qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
@@ -343,18 +484,15 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
qdf_mem_zero(&pipe_in, sizeof(pipe_in));
qdf_mem_zero(&pipe_out, sizeof(pipe_out));
if (is_smmu_enabled)
QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
else
QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
ol_txrx_setup_mcc_sys_pipes(sys_in, &pipe_in);
/* TX PIPE */
if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
if (is_smmu_enabled) {
QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = true;
tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(&pipe_in);
tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
} else {
QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in) = false;
tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
}
@@ -364,37 +502,20 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 1;
QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) =
OL_TXRX_IPA_UC_WLAN_8023_HDR_SIZE;
OL_TXRX_IPA_UC_WLAN_8023_HDR_SIZE;
QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
/* TODO: SMMU implementation on CLD_3.2 */
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: SMMU is not implementation on host", __func__);
return QDF_STATUS_E_FAILURE;
} else {
QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
ipa_res->tx_comp_ring_base_paddr;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
ipa_res->tx_comp_ring_size * sizeof(qdf_dma_addr_t);
if (is_smmu_enabled)
ol_txrx_ipa_wdi_tx_smmu_params(ipa_res, tx_smmu);
else
ol_txrx_ipa_wdi_tx_params(ipa_res, tx);
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
ipa_res->ce_sr_base_paddr;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
ipa_res->ce_sr_ring_size;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
ipa_res->ce_reg_paddr;
QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
ipa_res->tx_num_alloc_buffer;
QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
}
/* RX PIPE */
if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
if (is_smmu_enabled) {
rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(&pipe_in);
rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx_smmu);
} else {
rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
@@ -405,33 +526,16 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) =
OL_TXRX_IPA_UC_WLAN_8023_HDR_SIZE;
OL_TXRX_IPA_UC_WLAN_8023_HDR_SIZE;
QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(&pipe_in)) {
/* TODO: SMMU implementation on CLD_3.2 */
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: SMMU is not implementation on host", __func__);
return QDF_STATUS_E_FAILURE;
} else {
QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
ipa_res->rx_rdy_ring_base_paddr;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
ipa_res->rx_rdy_ring_size;
QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
ipa_res->rx_proc_done_idx_paddr;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
ipa_res->rx2_rdy_ring_base_paddr;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
ipa_res->rx2_rdy_ring_size;
QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
ipa_res->rx2_proc_done_idx_paddr;
QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = 0;
}
if (is_smmu_enabled)
ol_txrx_ipa_wdi_rx_smmu_params(ipa_res, rx_smmu);
else
ol_txrx_ipa_wdi_rx_params(ipa_res, rx);
QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
@@ -447,16 +551,27 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
/* IPA uC Doorbell registers */
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
__func__,
(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
(unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
"%s: Tx DB PA=0x%x, Rx DB PA=0x%x", __func__,
(unsigned int)
QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
(unsigned int)
QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
ipa_res->tx_comp_doorbell_paddr =
ipa_res->tx_comp_doorbell_dmaaddr =
QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
ipa_res->rx_ready_doorbell_paddr =
ipa_res->rx_ready_doorbell_dmaaddr =
QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
if (is_smmu_enabled) {
pld_smmu_map(osdev->dev, ipa_res->tx_comp_doorbell_dmaaddr,
&tx_comp_db_dmaaddr, sizeof(uint32_t));
ipa_res->tx_comp_doorbell_dmaaddr = tx_comp_db_dmaaddr;
pld_smmu_map(osdev->dev, ipa_res->rx_ready_doorbell_dmaaddr,
&rx_rdy_db_dmaaddr, sizeof(uint32_t));
ipa_res->rx_ready_doorbell_dmaaddr = rx_rdy_db_dmaaddr;
}
return QDF_STATUS_SUCCESS;
}
@@ -628,6 +743,133 @@ QDF_STATUS ol_txrx_ipa_disable_pipes(struct cdp_pdev *ppdev)
#else /* CONFIG_IPA_WDI_UNIFIED_API */
#ifdef ENABLE_SMMU_S1_TRANSLATION
/**
* ol_txrx_ipa_tx_smmu_params() - Config IPA TX params
* @ipa_res: IPA resources
* @pipe_in: IPA WDI TX pipe params
*
* Return: None
*/
static inline void ol_txrx_ipa_tx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_in_params_t *pipe_in)
{
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"%s: SMMU Enabled", __func__);
QDF_IPA_PIPE_IN_SMMU_ENABLED(pipe_in) = true;
qdf_mem_copy(&QDF_IPA_PIPE_IN_DL_SMMU_COMP_RING(pipe_in),
&ipa_res->tx_comp_ring->sgtable,
sizeof(sgtable_t));
QDF_IPA_PIPE_IN_DL_SMMU_COMP_RING_SIZE(pipe_in) =
ipa_res->tx_comp_ring->mem_info.size;
qdf_mem_copy(&QDF_IPA_PIPE_IN_DL_SMMU_CE_RING(pipe_in),
&ipa_res->ce_sr->sgtable,
sizeof(sgtable_t));
QDF_IPA_PIPE_IN_DL_SMMU_CE_DOOR_BELL_PA(pipe_in) =
ipa_res->ce_reg_paddr;
QDF_IPA_PIPE_IN_DL_SMMU_CE_RING_SIZE(pipe_in) =
ipa_res->ce_sr_ring_size;
QDF_IPA_PIPE_IN_DL_SMMU_NUM_TX_BUFFERS(pipe_in) =
ipa_res->tx_num_alloc_buffer;
}
/**
* ol_txrx_ipa_rx_smmu_params() - Config IPA TX params
* @ipa_res: IPA resources
* @pipe_in: IPA WDI TX pipe params
*
* Return: None
*/
static inline void ol_txrx_ipa_rx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_in_params_t *pipe_in)
{
qdf_mem_copy(&QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING(pipe_in),
&ipa_res->rx_rdy_ring->sgtable,
sizeof(sgtable_t));
QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_SIZE(pipe_in) =
ipa_res->rx_rdy_ring->mem_info.size;
QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_RP_PA(pipe_in) =
ipa_res->rx_proc_done_idx->mem_info.pa;
QDF_IPA_PIPE_IN_UL_SMMU_RDY_RING_RP_VA(pipe_in) =
ipa_res->rx_proc_done_idx->vaddr;
qdf_mem_copy(&QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING(pipe_in),
&ipa_res->rx2_rdy_ring->sgtable,
sizeof(sgtable_t));
QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_SIZE(pipe_in) =
ipa_res->rx2_rdy_ring->mem_info.size;
QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_WP_PA(pipe_in) =
ipa_res->rx2_proc_done_idx->mem_info.pa;
QDF_IPA_PIPE_IN_UL_SMMU_RDY_COMP_RING_WP_VA(pipe_in) =
ipa_res->rx2_proc_done_idx->vaddr;
}
#else
static inline void ol_txrx_ipa_tx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_in_params_t *pipe_in)
{
}
static inline void ol_txrx_ipa_rx_smmu_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_in_params_t *pipe_in)
{
}
#endif
/**
* ol_txrx_ipa_tx_params() - Config IPA TX params
* @ipa_res: IPA resources
* @pipe_in: IPA WDI TX pipe params
*
* Return: None
*/
static inline void ol_txrx_ipa_tx_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_in_params_t *pipe_in)
{
qdf_device_t osdev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
QDF_IPA_PIPE_IN_DL_COMP_RING_BASE_PA(pipe_in) =
qdf_mem_get_dma_addr(osdev,
&ipa_res->tx_comp_ring->mem_info);
QDF_IPA_PIPE_IN_DL_COMP_RING_SIZE(pipe_in) =
ipa_res->tx_comp_ring->mem_info.size;
QDF_IPA_PIPE_IN_DL_CE_RING_BASE_PA(pipe_in) =
qdf_mem_get_dma_addr(osdev,
&ipa_res->ce_sr->mem_info);
QDF_IPA_PIPE_IN_DL_CE_DOOR_BELL_PA(pipe_in) =
ipa_res->ce_reg_paddr;
QDF_IPA_PIPE_IN_DL_CE_RING_SIZE(pipe_in) =
ipa_res->ce_sr_ring_size;
QDF_IPA_PIPE_IN_DL_NUM_TX_BUFFERS(pipe_in) =
ipa_res->tx_num_alloc_buffer;
}
/**
* ol_txrx_ipa_rx_params() - Config IPA RX params
* @ipa_res: IPA resources
* @pipe_in: IPA WDI RX pipe params
*
* Return: None
*/
static inline void ol_txrx_ipa_rx_params(
struct ol_txrx_ipa_resources *ipa_res,
qdf_ipa_wdi_in_params_t *pipe_in)
{
QDF_IPA_PIPE_IN_UL_RDY_RING_BASE_PA(pipe_in) =
ipa_res->rx_rdy_ring->mem_info.pa;
QDF_IPA_PIPE_IN_UL_RDY_RING_SIZE(pipe_in) =
ipa_res->rx_rdy_ring->mem_info.size;
QDF_IPA_PIPE_IN_UL_RDY_RING_RP_PA(pipe_in) =
ipa_res->rx_proc_done_idx->mem_info.pa;
OL_TXRX_IPA_WDI2_SET(pipe_in, ipa_res,
cds_get_context(QDF_MODULE_ID_QDF_DEVICE));
}
/**
* ol_txrx_ipa_setup() - Setup and connect IPA pipes
* @pdev: handle to the device instance
@@ -649,16 +891,17 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
uint32_t *p_rx_pipe_handle)
{
ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)ppdev;
qdf_device_t osdev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
struct ol_txrx_ipa_resources *ipa_res = &pdev->ipa_resource;
qdf_ipa_wdi_in_params_t pipe_in;
qdf_ipa_wdi_out_params_t pipe_out;
uint32_t tx_comp_db_dmaaddr = 0, rx_rdy_db_dmaaddr = 0;
int ret;
qdf_mem_zero(&pipe_in, sizeof(pipe_in));
qdf_mem_zero(&pipe_out, sizeof(pipe_out));
/* TX PIPE */
QDF_IPA_PIPE_IN_NAT_EN(&pipe_in) = IPA_BYPASS_NAT;
QDF_IPA_PIPE_IN_HDR_LEN(&pipe_in) = OL_TXRX_IPA_UC_WLAN_TX_HDR_LEN;
@@ -679,16 +922,10 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
QDF_IPA_PIPE_IN_KEEP_IPA_AWAKE(&pipe_in) = true;
}
QDF_IPA_PIPE_IN_DL_COMP_RING_BASE_PA(&pipe_in) =
ipa_res->tx_comp_ring_base_paddr;
QDF_IPA_PIPE_IN_DL_COMP_RING_SIZE(&pipe_in) =
ipa_res->tx_comp_ring_size * sizeof(qdf_dma_addr_t);
QDF_IPA_PIPE_IN_DL_CE_RING_BASE_PA(&pipe_in) =
ipa_res->ce_sr_base_paddr;
QDF_IPA_PIPE_IN_DL_CE_DOOR_BELL_PA(&pipe_in) = ipa_res->ce_reg_paddr;
QDF_IPA_PIPE_IN_DL_CE_RING_SIZE(&pipe_in) = ipa_res->ce_sr_ring_size;
QDF_IPA_PIPE_IN_DL_NUM_TX_BUFFERS(&pipe_in) =
ipa_res->tx_num_alloc_buffer;
if (qdf_mem_smmu_s1_enabled(osdev))
ol_txrx_ipa_tx_smmu_params(ipa_res, &pipe_in);
else
ol_txrx_ipa_tx_params(ipa_res, &pipe_in);
/* Connect WDI IPA PIPE */
ret = qdf_ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
@@ -703,27 +940,11 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
"%s CONS DB pipe out 0x%x TX PIPE Handle 0x%x", __func__,
(unsigned int)QDF_IPA_PIPE_OUT_UC_DOOR_BELL_PA(&pipe_out),
pipe_out.clnt_hdl);
ipa_res->tx_comp_doorbell_paddr =
ipa_res->tx_comp_doorbell_dmaaddr =
QDF_IPA_PIPE_OUT_UC_DOOR_BELL_PA(&pipe_out);
/* WLAN TX PIPE Handle */
ipa_res->tx_pipe_handle = QDF_IPA_PIPE_OUT_CLNT_HDL(&pipe_out);
*p_tx_pipe_handle = QDF_IPA_PIPE_OUT_CLNT_HDL(&pipe_out);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"TX: %s 0x%x, %s %d, %s 0x%x, %s 0x%x, %s %d, %sNB %d, %s 0x%x",
"comp_ring_base_pa",
(unsigned int)QDF_IPA_PIPE_IN_DL_COMP_RING_BASE_PA(&pipe_in),
"comp_ring_size",
QDF_IPA_PIPE_IN_DL_COMP_RING_SIZE(&pipe_in),
"ce_ring_base_pa",
(unsigned int)QDF_IPA_PIPE_IN_DL_CE_RING_BASE_PA(&pipe_in),
"ce_door_bell_pa",
(unsigned int)QDF_IPA_PIPE_IN_DL_CE_DOOR_BELL_PA(&pipe_in),
"ce_ring_size",
QDF_IPA_PIPE_IN_DL_CE_RING_SIZE(&pipe_in),
"num_tx_buffers",
QDF_IPA_PIPE_IN_DL_NUM_TX_BUFFERS(&pipe_in),
"tx_comp_doorbell_paddr",
(unsigned int)ipa_res->tx_comp_doorbell_paddr);
/* RX PIPE */
QDF_IPA_PIPE_IN_NAT_EN(&pipe_in) = IPA_BYPASS_NAT;
@@ -741,12 +962,10 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
QDF_IPA_PIPE_IN_KEEP_IPA_AWAKE(&pipe_in) = true;
}
QDF_IPA_PIPE_IN_UL_RDY_RING_BASE_PA(&pipe_in) =
ipa_res->rx_rdy_ring_base_paddr;
QDF_IPA_PIPE_IN_UL_RDY_RING_SIZE(&pipe_in) = ipa_res->rx_rdy_ring_size;
QDF_IPA_PIPE_IN_UL_RDY_RING_RP_PA(&pipe_in) =
ipa_res->rx_proc_done_idx_paddr;
OL_TXRX_IPA_WDI2_SET(pipe_in, ipa_res);
if (qdf_mem_smmu_s1_enabled(osdev))
ol_txrx_ipa_rx_smmu_params(ipa_res, &pipe_in);
else
ol_txrx_ipa_rx_params(ipa_res, &pipe_in);
#ifdef FEATURE_METERING
QDF_IPA_PIPE_IN_WDI_NOTIFY(&pipe_in) = ipa_wdi_meter_notifier_cb;
@@ -758,20 +977,20 @@ QDF_STATUS ol_txrx_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
"ipa_connect_wdi_pipe: Rx pipe setup failed: ret=%d", ret);
return QDF_STATUS_E_FAILURE;
}
ipa_res->rx_ready_doorbell_paddr =
ipa_res->rx_ready_doorbell_dmaaddr =
QDF_IPA_PIPE_OUT_UC_DOOR_BELL_PA(&pipe_out);
ipa_res->rx_pipe_handle = QDF_IPA_PIPE_OUT_CLNT_HDL(&pipe_out);
*p_rx_pipe_handle = QDF_IPA_PIPE_OUT_CLNT_HDL(&pipe_out);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"RX: %s 0x%x, %s %d, %s 0x%x, %s 0x%x",
"rdy_ring_base_pa",
(unsigned int)QDF_IPA_PIPE_IN_UL_RDY_RING_BASE_PA(&pipe_in),
"rdy_ring_size",
QDF_IPA_PIPE_IN_UL_RDY_RING_SIZE(&pipe_in),
"rdy_ring_rp_pa",
(unsigned int)QDF_IPA_PIPE_IN_UL_RDY_RING_RP_PA(&pipe_in),
"rx_ready_doorbell_paddr",
(unsigned int)ipa_res->rx_ready_doorbell_paddr);
if (qdf_mem_smmu_s1_enabled(osdev)) {
pld_smmu_map(osdev->dev, ipa_res->tx_comp_doorbell_dmaaddr,
&tx_comp_db_dmaaddr, sizeof(uint32_t));
ipa_res->tx_comp_doorbell_dmaaddr = tx_comp_db_dmaaddr;
pld_smmu_map(osdev->dev, ipa_res->rx_ready_doorbell_dmaaddr,
&rx_rdy_db_dmaaddr, sizeof(uint32_t));
ipa_res->rx_ready_doorbell_dmaaddr = rx_rdy_db_dmaaddr;
}
return QDF_STATUS_SUCCESS;
}

Ver fichero

@@ -84,22 +84,24 @@ struct ol_txrx_ipa_uc_rx_hdr {
(OL_TXRX_IPA_WLAN_FRAG_HEADER + OL_TXRX_IPA_WLAN_IPA_HEADER)
#if defined(QCA_WIFI_3_0) && defined(CONFIG_IPA3)
#define OL_TXRX_IPA_WDI2_SET(pipe_in, ipa_res) \
#define OL_TXRX_IPA_WDI2_SET(pipe_in, ipa_res, osdev) \
do { \
pipe_in.u.ul.rdy_ring_rp_va = \
ipa_res->rx_proc_done_idx_vaddr; \
pipe_in.u.ul.rdy_comp_ring_base_pa = \
ipa_res->rx2_rdy_ring_base_paddr;\
pipe_in.u.ul.rdy_comp_ring_size = \
ipa_res->rx2_rdy_ring_size; \
pipe_in.u.ul.rdy_comp_ring_wp_pa = \
ipa_res->rx2_proc_done_idx_paddr; \
pipe_in.u.ul.rdy_comp_ring_wp_va = \
ipa_res->rx2_proc_done_idx_vaddr; \
QDF_IPA_PIPE_IN_UL_RDY_RING_RP_VA(pipe_in) = \
ipa_res->rx_proc_done_idx->vaddr; \
QDF_IPA_PIPE_IN_UL_RDY_COMP_RING(pipe_in) = \
qdf_mem_get_dma_addr(osdev, \
&ipa_res->rx2_rdy_ring->mem_info);\
QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_SIZE(pipe_in) = \
ipa_res->rx2_rdy_ring->mem_info.size; \
QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_WP_PA(pipe_in) = \
qdf_mem_get_dma_addr(osdev, \
&ipa_res->rx2_proc_done_idx->mem_info); \
QDF_IPA_PIPE_IN_UL_RDY_COMP_RING_WP_VA(pipe_in) = \
ipa_res->rx2_proc_done_idx->vaddr; \
} while (0)
#else
/* Do nothing */
#define OL_TXRX_IPA_WDI2_SET(pipe_in, ipa_res)
#define OL_TXRX_IPA_WDI2_SET(pipe_in, ipa_res, osdev)
#endif /* IPA3 */
QDF_STATUS ol_txrx_ipa_uc_get_resource(struct cdp_pdev *pdev);

Ver fichero

@@ -950,9 +950,7 @@ static inline void hdd_ipa_wdi_get_wdi_version(struct hdd_ipa_priv *hdd_ipa)
static inline bool hdd_ipa_wdi_is_smmu_enabled(struct hdd_ipa_priv *hdd_ipa,
qdf_device_t osdev)
{
/* TODO: Need to check if SMMU is supported on cld_3.2 */
/* return hdd_ipa->is_smmu_enabled && qdf_mem_smmu_s1_enabled(osdev); */
return 0;
return hdd_ipa->is_smmu_enabled && qdf_mem_smmu_s1_enabled(osdev);
}
static inline QDF_STATUS hdd_ipa_wdi_setup(struct hdd_ipa_priv *hdd_ipa)
@@ -1103,9 +1101,7 @@ static inline void hdd_ipa_wdi_get_wdi_version(struct hdd_ipa_priv *hdd_ipa)
static inline int hdd_ipa_wdi_is_smmu_enabled(struct hdd_ipa_priv *hdd_ipa,
qdf_device_t osdev)
{
/* TODO: Need to check if SMMU is supported on cld_3.2 */
/* return qdf_mem_smmu_s1_enabled(osdev); */
return 0;
return qdf_mem_smmu_s1_enabled(osdev);
}
static inline QDF_STATUS hdd_ipa_wdi_setup(struct hdd_ipa_priv *hdd_ipa)
@@ -5917,7 +5913,7 @@ static int __hdd_ipa_wlan_evt(struct hdd_adapter *adapter, uint8_t sta_id,
}
hdd_ipa->stats.num_send_msg++;
HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "sap_num_connected_sta=%d",
HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "%d",
hdd_ipa->sap_num_connected_sta);
return ret;
@@ -6381,7 +6377,8 @@ QDF_STATUS hdd_ipa_cleanup(struct hdd_context *hdd_ctx)
int hdd_ipa_uc_smmu_map(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
{
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "Map: %d Num_buf: %d", map, num_buf);
HDD_IPA_DP_LOG(QDF_TRACE_LEVEL_DEBUG, "Map: %d Num_buf: %d",
map, num_buf);
if (!num_buf) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "No buffers to map/unmap");