|
@@ -1,5 +1,5 @@
|
|
|
/*
|
|
|
- * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
|
|
|
+ * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
|
|
|
*
|
|
|
* Permission to use, copy, modify, and/or distribute this software for
|
|
|
* any purpose with or without fee is hereby granted, provided that the
|
|
@@ -107,6 +107,22 @@ htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * htt_rx_ring_smmu_mapped() - check if rx ring is smmu mapped or not
|
|
|
+ * @pdev: HTT pdev handle
|
|
|
+ *
|
|
|
+ * Return: true or false.
|
|
|
+ */
|
|
|
+static inline bool htt_rx_ring_smmu_mapped(htt_pdev_handle pdev)
|
|
|
+{
|
|
|
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
|
|
|
+ pdev->is_ipa_uc_enabled &&
|
|
|
+ pdev->rx_ring.smmu_map)
|
|
|
+ return true;
|
|
|
+ else
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
|
|
|
{
|
|
|
int idx;
|
|
@@ -375,14 +391,9 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
|
|
|
int filled = 0;
|
|
|
int debt_served = 0;
|
|
|
qdf_mem_info_t mem_map_table = {0};
|
|
|
- bool ipa_smmu = false;
|
|
|
|
|
|
idx = *pdev->rx_ring.alloc_idx.vaddr;
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
- pdev->rx_ring.smmu_map)
|
|
|
- ipa_smmu = true;
|
|
|
-
|
|
|
if ((idx < 0) || (idx > pdev->rx_ring.size_mask) ||
|
|
|
(num > pdev->rx_ring.size)) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_HTT,
|
|
@@ -476,10 +487,12 @@ moretofill:
|
|
|
pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
|
|
|
}
|
|
|
|
|
|
- if (ipa_smmu) {
|
|
|
+ /* Caller already protected this function with refill_lock */
|
|
|
+ if (qdf_nbuf_is_rx_ipa_smmu_map(rx_netbuf)) {
|
|
|
qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
|
|
|
paddr, HTT_RX_BUF_SIZE);
|
|
|
- cds_smmu_map_unmap(true, 1, &mem_map_table);
|
|
|
+ qdf_assert_always(
|
|
|
+ !cds_smmu_map_unmap(true, 1, &mem_map_table));
|
|
|
}
|
|
|
|
|
|
pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked;
|
|
@@ -1142,6 +1155,15 @@ htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
|
|
|
RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x netbuf %pK bucket %d\n",
|
|
|
paddr, netbuf, (int)i));
|
|
|
|
|
|
+ if (htt_rx_ring_smmu_mapped(pdev)) {
|
|
|
+ if (qdf_unlikely(qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) {
|
|
|
+ qdf_err("Already smmu mapped, nbuf: %pK",
|
|
|
+ netbuf);
|
|
|
+ qdf_assert_always(0);
|
|
|
+ }
|
|
|
+ qdf_nbuf_set_rx_ipa_smmu_map(netbuf, true);
|
|
|
+ }
|
|
|
+
|
|
|
HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
|
|
|
HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
|
|
|
|
|
@@ -1204,6 +1226,13 @@ qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (netbuf && htt_rx_ring_smmu_mapped(pdev)) {
|
|
|
+ if (qdf_unlikely(!qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) {
|
|
|
+ qdf_err("smmu not mapped nbuf: %pK", netbuf);
|
|
|
+ qdf_assert_always(0);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
RX_HASH_LOG(qdf_print("rx hash: paddr 0x%llx, netbuf %pK, bucket %d\n",
|
|
|
(unsigned long long)paddr, netbuf, (int)i));
|
|
|
HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
|
|
@@ -1311,11 +1340,8 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
if (!pdev->rx_ring.hash_table)
|
|
|
return;
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
- pdev->rx_ring.smmu_map)
|
|
|
- ipa_smmu = true;
|
|
|
-
|
|
|
qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
|
|
|
+ ipa_smmu = htt_rx_ring_smmu_mapped(pdev);
|
|
|
hash_table = pdev->rx_ring.hash_table;
|
|
|
pdev->rx_ring.hash_table = NULL;
|
|
|
qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
|
|
@@ -1330,14 +1356,26 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
|
|
|
listnode_offset);
|
|
|
if (hash_entry->netbuf) {
|
|
|
if (ipa_smmu) {
|
|
|
+ if (qdf_unlikely(
|
|
|
+ !qdf_nbuf_is_rx_ipa_smmu_map(
|
|
|
+ hash_entry->netbuf))) {
|
|
|
+ qdf_err("nbuf: %pK NOT mapped",
|
|
|
+ hash_entry->netbuf);
|
|
|
+ qdf_assert_always(0);
|
|
|
+ }
|
|
|
+ qdf_nbuf_set_rx_ipa_smmu_map(
|
|
|
+ hash_entry->netbuf,
|
|
|
+ false);
|
|
|
qdf_update_mem_map_table(pdev->osdev,
|
|
|
&mem_map_table,
|
|
|
QDF_NBUF_CB_PADDR(
|
|
|
hash_entry->netbuf),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
|
|
|
- cds_smmu_map_unmap(false, 1,
|
|
|
- &mem_map_table);
|
|
|
+ qdf_assert_always(
|
|
|
+ !cds_smmu_map_unmap(
|
|
|
+ false, 1,
|
|
|
+ &mem_map_table));
|
|
|
}
|
|
|
#ifdef DEBUG_DMA_DONE
|
|
|
qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
|
|
@@ -1449,7 +1487,6 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
qdf_dma_addr_t paddr;
|
|
|
qdf_mem_info_t mem_map_table = {0};
|
|
|
int ret = 1;
|
|
|
- bool ipa_smmu = false;
|
|
|
struct htt_host_rx_desc_base *timestamp_rx_desc = NULL;
|
|
|
|
|
|
HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
|
|
@@ -1467,10 +1504,6 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
|
|
|
HTT_RX_CHECK_MSDU_COUNT(msdu_count);
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
- pdev->rx_ring.smmu_map)
|
|
|
- ipa_smmu = true;
|
|
|
-
|
|
|
ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
|
|
|
htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
|
|
|
|
|
@@ -1496,11 +1529,24 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
|
|
|
}
|
|
|
|
|
|
while (msdu_count > 0) {
|
|
|
- if (ipa_smmu) {
|
|
|
+ if (qdf_nbuf_is_rx_ipa_smmu_map(msdu)) {
|
|
|
+ /*
|
|
|
+ * nbuf was already detached from hash_entry,
|
|
|
+ * there is no parallel IPA context to access
|
|
|
+ * this nbuf for smmu map/unmap, so updating
|
|
|
+ * this flag here without lock.
|
|
|
+ *
|
|
|
+ * This flag was not updated in netbuf_pop context
|
|
|
+ * htt_rx_hash_list_lookup (where lock held), to
|
|
|
+ * differentiate whether this nbuf to be
|
|
|
+ * smmu unmapped or it was never mapped so far.
|
|
|
+ */
|
|
|
+ qdf_nbuf_set_rx_ipa_smmu_map(msdu, false);
|
|
|
qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
|
|
|
QDF_NBUF_CB_PADDR(msdu),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
- cds_smmu_map_unmap(false, 1, &mem_map_table);
|
|
|
+ qdf_assert_always(
|
|
|
+ !cds_smmu_map_unmap(false, 1, &mem_map_table));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2219,14 +2265,13 @@ fail1:
|
|
|
void htt_rx_detach(struct htt_pdev_t *pdev)
|
|
|
{
|
|
|
bool ipa_smmu = false;
|
|
|
+ qdf_nbuf_t nbuf;
|
|
|
|
|
|
qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
|
|
|
qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
|
|
|
htt_rx_dbg_rxbuf_deinit(pdev);
|
|
|
|
|
|
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
|
|
|
- pdev->rx_ring.smmu_map)
|
|
|
- ipa_smmu = true;
|
|
|
+ ipa_smmu = htt_rx_ring_smmu_mapped(pdev);
|
|
|
|
|
|
if (pdev->cfg.is_full_reorder_offload) {
|
|
|
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
|
|
@@ -2243,29 +2288,31 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
|
|
|
qdf_mem_info_t mem_map_table = {0};
|
|
|
|
|
|
while (sw_rd_idx != *pdev->rx_ring.alloc_idx.vaddr) {
|
|
|
+ nbuf = pdev->rx_ring.buf.netbufs_ring[sw_rd_idx];
|
|
|
if (ipa_smmu) {
|
|
|
+ if (qdf_unlikely(
|
|
|
+ !qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
|
|
|
+ qdf_err("smmu not mapped, nbuf: %pK",
|
|
|
+ nbuf);
|
|
|
+ qdf_assert_always(0);
|
|
|
+ }
|
|
|
+ qdf_nbuf_set_rx_ipa_smmu_map(nbuf, false);
|
|
|
qdf_update_mem_map_table(pdev->osdev,
|
|
|
&mem_map_table,
|
|
|
- QDF_NBUF_CB_PADDR(
|
|
|
- pdev->rx_ring.buf.
|
|
|
- netbufs_ring[sw_rd_idx]),
|
|
|
+ QDF_NBUF_CB_PADDR(nbuf),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
- cds_smmu_map_unmap(false, 1,
|
|
|
- &mem_map_table);
|
|
|
+ qdf_assert_always(
|
|
|
+ !cds_smmu_map_unmap(false, 1,
|
|
|
+ &mem_map_table));
|
|
|
}
|
|
|
#ifdef DEBUG_DMA_DONE
|
|
|
- qdf_nbuf_unmap(pdev->osdev,
|
|
|
- pdev->rx_ring.buf.
|
|
|
- netbufs_ring[sw_rd_idx],
|
|
|
+ qdf_nbuf_unmap(pdev->osdev, nbuf,
|
|
|
QDF_DMA_BIDIRECTIONAL);
|
|
|
#else
|
|
|
- qdf_nbuf_unmap(pdev->osdev,
|
|
|
- pdev->rx_ring.buf.
|
|
|
- netbufs_ring[sw_rd_idx],
|
|
|
+ qdf_nbuf_unmap(pdev->osdev, nbuf,
|
|
|
QDF_DMA_FROM_DEVICE);
|
|
|
#endif
|
|
|
- qdf_nbuf_free(pdev->rx_ring.buf.
|
|
|
- netbufs_ring[sw_rd_idx]);
|
|
|
+ qdf_nbuf_free(nbuf);
|
|
|
sw_rd_idx++;
|
|
|
sw_rd_idx &= pdev->rx_ring.size_mask;
|
|
|
}
|
|
@@ -2300,6 +2347,7 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
|
|
|
struct htt_rx_hash_bucket **hash_table;
|
|
|
struct htt_list_node *list_iter = NULL;
|
|
|
qdf_mem_info_t mem_map_table = {0};
|
|
|
+ qdf_nbuf_t nbuf;
|
|
|
int ret;
|
|
|
|
|
|
qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
|
|
@@ -2313,15 +2361,27 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
|
|
|
(struct htt_rx_hash_entry *)((char *)list_iter -
|
|
|
pdev->rx_ring.
|
|
|
listnode_offset);
|
|
|
- if (hash_entry->netbuf) {
|
|
|
+ nbuf = hash_entry->netbuf;
|
|
|
+ if (nbuf) {
|
|
|
+ if (qdf_unlikely(map ==
|
|
|
+ qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
|
|
|
+ qdf_err("map/unmap err:%d, nbuf:%pK",
|
|
|
+ map, nbuf);
|
|
|
+ list_iter = list_iter->next;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ qdf_nbuf_set_rx_ipa_smmu_map(nbuf, map);
|
|
|
qdf_update_mem_map_table(pdev->osdev,
|
|
|
&mem_map_table,
|
|
|
- QDF_NBUF_CB_PADDR(
|
|
|
- hash_entry->netbuf),
|
|
|
+ QDF_NBUF_CB_PADDR(nbuf),
|
|
|
HTT_RX_BUF_SIZE);
|
|
|
ret = cds_smmu_map_unmap(map, 1,
|
|
|
&mem_map_table);
|
|
|
if (ret) {
|
|
|
+ qdf_nbuf_set_rx_ipa_smmu_map(nbuf,
|
|
|
+ !map);
|
|
|
+ qdf_err("map: %d failure, nbuf: %pK",
|
|
|
+ map, nbuf);
|
|
|
qdf_spin_unlock_bh(
|
|
|
&pdev->rx_ring.rx_hash_lock);
|
|
|
return QDF_STATUS_E_FAILURE;
|
|
@@ -2331,6 +2391,7 @@ static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ pdev->rx_ring.smmu_map = map;
|
|
|
qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
|
|
|
|
|
|
return QDF_STATUS_SUCCESS;
|
|
@@ -2347,7 +2408,6 @@ QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map)
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
|
|
|
qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
|
|
|
- pdev->rx_ring.smmu_map = map;
|
|
|
status = htt_rx_hash_smmu_map(map, pdev);
|
|
|
qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
|
|
|
|