|
@@ -40,6 +40,7 @@
|
|
|
#include "ce_tasklet.h"
|
|
|
#include "qdf_module.h"
|
|
|
#include "qdf_ssr_driver_dump.h"
|
|
|
+#include <wbuff.h>
|
|
|
|
|
|
#define CE_POLL_TIMEOUT 10 /* ms */
|
|
|
|
|
@@ -3300,6 +3301,141 @@ hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
|
|
|
&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
|
|
|
}
|
|
|
|
|
|
+#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
|
|
|
+
|
|
|
+#define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32
|
|
|
+
|
|
|
+static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
|
|
|
+{
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
+ struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
|
|
|
+ qdf_nbuf_t nbuf;
|
|
|
+
|
|
|
+ nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__,
|
|
|
+ __LINE__);
|
|
|
+ if (!nbuf)
|
|
|
+ nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz,
|
|
|
+ 0, 4, false);
|
|
|
+
|
|
|
+ if (!nbuf)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return nbuf;
|
|
|
+}
|
|
|
+
|
|
|
+static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
|
|
|
+{
|
|
|
+ nbuf = wbuff_buff_put(nbuf);
|
|
|
+ if (nbuf)
|
|
|
+ qdf_nbuf_free(nbuf);
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state)
|
|
|
+{
|
|
|
+ int ul_is_polled, dl_is_polled;
|
|
|
+ bool is_wmi_svc, wmi_diag_svc;
|
|
|
+ uint8_t ul_pipe, dl_pipe;
|
|
|
+ int pool_size;
|
|
|
+ int status;
|
|
|
+ int ce_id;
|
|
|
+
|
|
|
+ if (!ce_state)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ce_id = ce_state->id;
|
|
|
+
|
|
|
+ status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
|
|
|
+ &ul_pipe, &dl_pipe,
|
|
|
+ &ul_is_polled, &dl_is_polled);
|
|
|
+ is_wmi_svc = !status && (dl_pipe == ce_id);
|
|
|
+
|
|
|
+ status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
|
|
|
+ WMI_CONTROL_DIAG_SVC,
|
|
|
+ &ul_pipe, &dl_pipe,
|
|
|
+ &ul_is_polled, &dl_is_polled);
|
|
|
+ wmi_diag_svc = !status;
|
|
|
+
|
|
|
+ if (is_wmi_svc && !wmi_diag_svc)
|
|
|
+ pool_size = ce_state->dest_ring->nentries +
|
|
|
+ HIF_CE_RX_NBUF_WMI_POOL_SIZE;
|
|
|
+ else if (is_wmi_svc && wmi_diag_svc)
|
|
|
+ pool_size = ce_state->dest_ring->nentries +
|
|
|
+ HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
|
|
|
+ else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe)
|
|
|
+ pool_size = ce_state->dest_ring->nentries +
|
|
|
+ HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
|
|
|
+ else
|
|
|
+ pool_size = ce_state->dest_ring->nentries;
|
|
|
+
|
|
|
+ return pool_size;
|
|
|
+}
|
|
|
+
|
|
|
+static void hif_ce_rx_wbuff_register(struct hif_softc *scn)
|
|
|
+{
|
|
|
+ struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0};
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
+ struct HIF_CE_pipe_info *pipe_info;
|
|
|
+ struct CE_state *ce_state;
|
|
|
+ int ce_id;
|
|
|
+
|
|
|
+ for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
|
|
|
+ pipe_info = &hif_state->pipe_info[ce_id];
|
|
|
+ ce_state = scn->ce_id_to_state[ce_id];
|
|
|
+
|
|
|
+ if (!pipe_info->buf_sz)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Only RX CEs need WBUFF registration. recv_bufs_needed
|
|
|
+ * contains valid count for RX CEs during init time.
|
|
|
+ */
|
|
|
+ if (!atomic_read(&pipe_info->recv_bufs_needed))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (ce_is_fastpath_enabled(scn) &&
|
|
|
+ ce_state->htt_rx_data)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ wbuff_alloc[ce_id].pool_id = ce_id;
|
|
|
+ wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz;
|
|
|
+ wbuff_alloc[ce_id].pool_size =
|
|
|
+ hif_calc_wbuff_pool_size(scn, ce_state);
|
|
|
+ }
|
|
|
+
|
|
|
+ scn->wbuff_handle =
|
|
|
+ wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4,
|
|
|
+ WBUFF_MODULE_CE_RX);
|
|
|
+}
|
|
|
+
|
|
|
+static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
|
|
|
+{
|
|
|
+ wbuff_module_deregister(scn->wbuff_handle);
|
|
|
+ scn->wbuff_handle = NULL;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline qdf_nbuf_t
|
|
|
+hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
|
|
|
+{
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
+ struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
|
|
|
+
|
|
|
+ return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
|
|
|
+{
|
|
|
+ return qdf_nbuf_free(nbuf);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */
|
|
|
+
|
|
|
/**
|
|
|
* hif_ce_do_recv(): send message from copy engine to upper layers
|
|
|
* @msg_callbacks: structure containing callback and callback context
|
|
@@ -3322,7 +3458,7 @@ static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
|
|
|
netbuf, pipe_info->pipe_num);
|
|
|
} else {
|
|
|
hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
|
|
|
- qdf_nbuf_free(netbuf);
|
|
|
+ hif_ce_rx_nbuf_free(netbuf);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3350,7 +3486,7 @@ hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
|
|
|
atomic_inc(&pipe_info->recv_bufs_needed);
|
|
|
hif_post_recv_buffers_for_pipe(pipe_info);
|
|
|
if (scn->target_status == TARGET_STATUS_RESET)
|
|
|
- qdf_nbuf_free(transfer_context);
|
|
|
+ hif_ce_rx_nbuf_free(transfer_context);
|
|
|
else
|
|
|
hif_ce_do_recv(msg_callbacks, transfer_context,
|
|
|
nbytes, pipe_info);
|
|
@@ -3599,7 +3735,7 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
|
|
|
hif_record_ce_desc_event(scn, ce_id,
|
|
|
HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
|
|
|
0, 0);
|
|
|
- nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
|
|
|
+ nbuf = hif_ce_rx_nbuf_alloc(scn, ce_id);
|
|
|
if (!nbuf) {
|
|
|
hif_post_recv_buffers_failure(pipe_info, nbuf,
|
|
|
&pipe_info->nbuf_alloc_err_count,
|
|
@@ -3624,7 +3760,7 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
|
|
|
&pipe_info->nbuf_dma_err_count,
|
|
|
HIF_RX_NBUF_MAP_FAILURE,
|
|
|
"HIF_RX_NBUF_MAP_FAILURE");
|
|
|
- qdf_nbuf_free(nbuf);
|
|
|
+ hif_ce_rx_nbuf_free(nbuf);
|
|
|
return status;
|
|
|
}
|
|
|
|
|
@@ -3643,7 +3779,7 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
|
|
|
|
|
|
qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
|
|
|
QDF_DMA_FROM_DEVICE);
|
|
|
- qdf_nbuf_free(nbuf);
|
|
|
+ hif_ce_rx_nbuf_free(nbuf);
|
|
|
return status;
|
|
|
}
|
|
|
|
|
@@ -3813,6 +3949,8 @@ QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
|
|
|
if (hif_completion_thread_startup(hif_state))
|
|
|
return QDF_STATUS_E_FAILURE;
|
|
|
|
|
|
+ hif_ce_rx_wbuff_register(scn);
|
|
|
+
|
|
|
/* enable buffer cleanup */
|
|
|
hif_state->started = true;
|
|
|
|
|
@@ -3865,7 +4003,7 @@ static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
|
|
|
if (netbuf) {
|
|
|
qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
|
|
|
QDF_DMA_FROM_DEVICE);
|
|
|
- qdf_nbuf_free(netbuf);
|
|
|
+ hif_ce_rx_nbuf_free(netbuf);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -4002,6 +4140,7 @@ void hif_ce_stop(struct hif_softc *scn)
|
|
|
}
|
|
|
|
|
|
hif_buffer_cleanup(hif_state);
|
|
|
+ hif_ce_rx_wbuff_deregister(scn);
|
|
|
|
|
|
for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
|
|
|
struct HIF_CE_pipe_info *pipe_info;
|