|
@@ -134,23 +134,11 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
struct htt_rx_hash_entry *hash_entry;
|
|
|
struct htt_rx_hash_bucket **hash_table;
|
|
|
struct htt_list_node *list_iter = NULL;
|
|
|
- qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
|
|
|
- uint32_t num_unmapped = 0;
|
|
|
+ qdf_mem_info_t mem_map_table = {0};
|
|
|
|
|
|
if (NULL == pdev->rx_ring.hash_table)
|
|
|
return;
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
|
|
|
- mem_map_table = qdf_mem_map_table_alloc(
|
|
|
- RX_NUM_HASH_BUCKETS * RX_ENTRIES_SIZE);
|
|
|
- if (!mem_map_table) {
|
|
|
- qdf_print("%s: Failed to allocate memory for mem map table\n",
|
|
|
- __func__);
|
|
|
- return;
|
|
|
- }
|
|
|
- mem_info = mem_map_table;
|
|
|
- }
|
|
|
-
|
|
|
qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
|
|
|
hash_table = pdev->rx_ring.hash_table;
|
|
|
pdev->rx_ring.hash_table = NULL;
|
|
@@ -168,12 +156,13 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
pdev->is_ipa_uc_enabled) {
|
|
|
qdf_update_mem_map_table(pdev->osdev,
|
|
|
- mem_info,
|
|
|
+ &mem_map_table,
|
|
|
QDF_NBUF_CB_PADDR(
|
|
|
hash_entry->netbuf),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
- mem_info++;
|
|
|
- num_unmapped++;
|
|
|
+
|
|
|
+ cds_smmu_map_unmap(false, 1,
|
|
|
+ &mem_map_table);
|
|
|
}
|
|
|
#ifdef DEBUG_DMA_DONE
|
|
|
qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
|
|
@@ -197,13 +186,6 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
qdf_mem_free(hash_table);
|
|
|
|
|
|
qdf_spinlock_destroy(&(pdev->rx_ring.rx_hash_lock));
|
|
|
-
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
|
|
|
- if (num_unmapped)
|
|
|
- cds_smmu_map_unmap(false, num_unmapped,
|
|
|
- mem_map_table);
|
|
|
- qdf_mem_free(mem_map_table);
|
|
|
- }
|
|
|
}
|
|
|
#endif
|
|
|
|