|
@@ -135,10 +135,15 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
struct htt_rx_hash_bucket **hash_table;
|
|
|
struct htt_list_node *list_iter = NULL;
|
|
|
qdf_mem_info_t mem_map_table = {0};
|
|
|
+ bool ipa_smmu = false;
|
|
|
|
|
|
if (NULL == pdev->rx_ring.hash_table)
|
|
|
return;
|
|
|
|
|
|
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
+ pdev->rx_ring.smmu_map)
|
|
|
+ ipa_smmu = true;
|
|
|
+
|
|
|
qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
|
|
|
hash_table = pdev->rx_ring.hash_table;
|
|
|
pdev->rx_ring.hash_table = NULL;
|
|
@@ -153,8 +158,7 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
pdev->rx_ring.
|
|
|
listnode_offset);
|
|
|
if (hash_entry->netbuf) {
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
- pdev->is_ipa_uc_enabled) {
|
|
|
+ if (ipa_smmu) {
|
|
|
qdf_update_mem_map_table(pdev->osdev,
|
|
|
&mem_map_table,
|
|
|
QDF_NBUF_CB_PADDR(
|
|
@@ -395,9 +399,14 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
|
|
|
int filled = 0;
|
|
|
int debt_served = 0;
|
|
|
qdf_mem_info_t mem_map_table = {0};
|
|
|
+ bool ipa_smmu = false;
|
|
|
|
|
|
idx = *(pdev->rx_ring.alloc_idx.vaddr);
|
|
|
|
|
|
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
+ pdev->rx_ring.smmu_map)
|
|
|
+ ipa_smmu = true;
|
|
|
+
|
|
|
if ((idx < 0) || (idx > pdev->rx_ring.size_mask) ||
|
|
|
(num > pdev->rx_ring.size)) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_HTT,
|
|
@@ -494,8 +503,7 @@ moretofill:
|
|
|
pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
|
|
|
}
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
- pdev->is_ipa_uc_enabled) {
|
|
|
+ if (ipa_smmu) {
|
|
|
qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
|
|
|
paddr, HTT_RX_BUF_SIZE);
|
|
|
cds_smmu_map_unmap(true, 1, &mem_map_table);
|
|
@@ -633,10 +641,15 @@ static inline unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
|
|
|
|
|
|
void htt_rx_detach(struct htt_pdev_t *pdev)
|
|
|
{
|
|
|
+ bool ipa_smmu = false;
|
|
|
qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
|
|
|
qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
|
|
|
htt_rx_dbg_rxbuf_deinit(pdev);
|
|
|
|
|
|
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
+ pdev->rx_ring.smmu_map)
|
|
|
+ ipa_smmu = true;
|
|
|
+
|
|
|
if (pdev->cfg.is_full_reorder_offload) {
|
|
|
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
|
|
|
sizeof(uint32_t),
|
|
@@ -649,30 +662,18 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
|
|
|
htt_rx_hash_deinit(pdev);
|
|
|
} else {
|
|
|
int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
|
|
|
- qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
|
|
|
- uint32_t num_unmapped = 0;
|
|
|
-
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
- pdev->is_ipa_uc_enabled) {
|
|
|
- mem_map_table = qdf_mem_map_table_alloc(
|
|
|
- pdev->rx_ring.fill_level);
|
|
|
- if (!mem_map_table) {
|
|
|
- qdf_print("%s: Failed to allocate memory for mem map table\n",
|
|
|
- __func__);
|
|
|
- return;
|
|
|
- }
|
|
|
- mem_info = mem_map_table;
|
|
|
- }
|
|
|
+ qdf_mem_info_t mem_map_table = {0};
|
|
|
+
|
|
|
while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
- pdev->is_ipa_uc_enabled) {
|
|
|
- qdf_update_mem_map_table(pdev->osdev, mem_info,
|
|
|
+ if (ipa_smmu) {
|
|
|
+ qdf_update_mem_map_table(pdev->osdev,
|
|
|
+ &mem_map_table,
|
|
|
QDF_NBUF_CB_PADDR(
|
|
|
- pdev->rx_ring.buf.netbufs_ring[
|
|
|
- sw_rd_idx]),
|
|
|
+ pdev->rx_ring.buf.
|
|
|
+ netbufs_ring[sw_rd_idx]),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
- mem_info++;
|
|
|
- num_unmapped++;
|
|
|
+ cds_smmu_map_unmap(false, 1,
|
|
|
+ &mem_map_table);
|
|
|
}
|
|
|
#ifdef DEBUG_DMA_DONE
|
|
|
qdf_nbuf_unmap(pdev->osdev,
|
|
@@ -692,13 +693,6 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
|
|
|
}
|
|
|
qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
- pdev->is_ipa_uc_enabled) {
|
|
|
- if (num_unmapped)
|
|
|
- cds_smmu_map_unmap(false, num_unmapped,
|
|
|
- mem_map_table);
|
|
|
- qdf_mem_free(mem_map_table);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
|
|
@@ -1651,9 +1645,9 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
struct htt_host_rx_desc_base *rx_desc;
|
|
|
enum rx_pkt_fate status = RX_PKT_FATE_SUCCESS;
|
|
|
qdf_dma_addr_t paddr;
|
|
|
- qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
|
|
|
- uint32_t num_unmapped = 0;
|
|
|
+ qdf_mem_info_t mem_map_table = {0};
|
|
|
int ret = 1;
|
|
|
+ bool ipa_smmu = false;
|
|
|
|
|
|
HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
|
|
|
|
|
@@ -1669,15 +1663,11 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
/* Get the total number of MSDUs */
|
|
|
msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
|
|
|
HTT_RX_CHECK_MSDU_COUNT(msdu_count);
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
|
|
|
- mem_map_table = qdf_mem_map_table_alloc(msdu_count);
|
|
|
- if (!mem_map_table) {
|
|
|
- qdf_print("%s: Failed to allocate memory for mem map table\n",
|
|
|
- __func__);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- mem_info = mem_map_table;
|
|
|
- }
|
|
|
+
|
|
|
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
+ pdev->rx_ring.smmu_map)
|
|
|
+ ipa_smmu = true;
|
|
|
+
|
|
|
ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
|
|
|
htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
|
|
|
|
|
@@ -1688,7 +1678,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
msg_word);
|
|
|
*head_msdu = *tail_msdu = NULL;
|
|
|
ret = 0;
|
|
|
- goto free_mem_map_table;
|
|
|
+ goto end;
|
|
|
}
|
|
|
|
|
|
paddr = htt_rx_in_ord_paddr_get(msg_word);
|
|
@@ -1699,17 +1689,15 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
*tail_msdu = NULL;
|
|
|
pdev->rx_ring.pop_fail_cnt++;
|
|
|
ret = 0;
|
|
|
- goto free_mem_map_table;
|
|
|
+ goto end;
|
|
|
}
|
|
|
|
|
|
while (msdu_count > 0) {
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
- pdev->is_ipa_uc_enabled) {
|
|
|
- qdf_update_mem_map_table(pdev->osdev, mem_info,
|
|
|
+ if (ipa_smmu) {
|
|
|
+ qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
|
|
|
QDF_NBUF_CB_PADDR(msdu),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
- mem_info++;
|
|
|
- num_unmapped++;
|
|
|
+ cds_smmu_map_unmap(false, 1, &mem_map_table);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1787,11 +1775,11 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
if (!prev) {
|
|
|
*head_msdu = *tail_msdu = NULL;
|
|
|
ret = 0;
|
|
|
- goto free_mem_map_table;
|
|
|
+ goto end;
|
|
|
}
|
|
|
*tail_msdu = prev;
|
|
|
qdf_nbuf_set_next(prev, NULL);
|
|
|
- goto free_mem_map_table;
|
|
|
+ goto end;
|
|
|
} else { /* if this is not the last msdu */
|
|
|
/* get the next msdu */
|
|
|
msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
|
|
@@ -1803,7 +1791,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
*tail_msdu = NULL;
|
|
|
pdev->rx_ring.pop_fail_cnt++;
|
|
|
ret = 0;
|
|
|
- goto free_mem_map_table;
|
|
|
+ goto end;
|
|
|
}
|
|
|
|
|
|
/* if this is not the first msdu, update the
|
|
@@ -1836,7 +1824,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
*tail_msdu = NULL;
|
|
|
pdev->rx_ring.pop_fail_cnt++;
|
|
|
ret = 0;
|
|
|
- goto free_mem_map_table;
|
|
|
+ goto end;
|
|
|
}
|
|
|
qdf_nbuf_set_next(msdu, next);
|
|
|
prev = msdu;
|
|
@@ -1847,13 +1835,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-free_mem_map_table:
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
|
|
|
- if (num_unmapped)
|
|
|
- cds_smmu_map_unmap(false, num_unmapped,
|
|
|
- mem_map_table);
|
|
|
- qdf_mem_free(mem_map_table);
|
|
|
- }
|
|
|
+end:
|
|
|
return ret;
|
|
|
}
|
|
|
#endif
|
|
@@ -2945,4 +2927,61 @@ void htt_deregister_rx_pkt_dump_callback(struct htt_pdev_t *pdev)
|
|
|
}
|
|
|
pdev->rx_pkt_dump_cb = NULL;
|
|
|
}
|
|
|
+
|
|
|
+static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
|
|
|
+{
|
|
|
+ uint32_t i;
|
|
|
+ struct htt_rx_hash_entry *hash_entry;
|
|
|
+ struct htt_rx_hash_bucket **hash_table;
|
|
|
+ struct htt_list_node *list_iter = NULL;
|
|
|
+ qdf_mem_info_t mem_map_table = {0};
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
|
|
|
+ hash_table = pdev->rx_ring.hash_table;
|
|
|
+
|
|
|
+ for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
|
|
|
+ /* Free the hash entries in hash bucket i */
|
|
|
+ list_iter = hash_table[i]->listhead.next;
|
|
|
+ while (list_iter != &hash_table[i]->listhead) {
|
|
|
+ hash_entry =
|
|
|
+ (struct htt_rx_hash_entry *)((char *)list_iter -
|
|
|
+ pdev->rx_ring.
|
|
|
+ listnode_offset);
|
|
|
+ if (hash_entry->netbuf) {
|
|
|
+ qdf_update_mem_map_table(pdev->osdev,
|
|
|
+ &mem_map_table,
|
|
|
+ QDF_NBUF_CB_PADDR(
|
|
|
+ hash_entry->netbuf),
|
|
|
+ HTT_RX_BUF_SIZE);
|
|
|
+ ret = cds_smmu_map_unmap(map, 1,
|
|
|
+ &mem_map_table);
|
|
|
+ if (ret)
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
+ }
|
|
|
+ list_iter = list_iter->next;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
|
|
|
+
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
+}
|
|
|
+
|
|
|
+QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map)
|
|
|
+{
|
|
|
+ QDF_STATUS status;
|
|
|
+
|
|
|
+ if (NULL == pdev->rx_ring.hash_table)
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
+
|
|
|
+ if (!qdf_mem_smmu_s1_enabled(pdev->osdev) || !pdev->is_ipa_uc_enabled)
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
+
|
|
|
+ qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
|
|
|
+ pdev->rx_ring.smmu_map = map;
|
|
|
+ status = htt_rx_hash_smmu_map(map, pdev);
|
|
|
+ qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
|
|
|
+
|
|
|
+ return status;
|
|
|
+}
|
|
|
#endif
|