|
@@ -249,15 +249,6 @@ bool hif_ce_service_should_yield(struct hif_softc *scn,
|
|
|
* Guts of ce_send, used by both ce_send and ce_sendlist_send.
|
|
|
* The caller takes responsibility for any needed locking.
|
|
|
*/
|
|
|
-int
|
|
|
-ce_completed_send_next_nolock(struct CE_state *CE_state,
|
|
|
- void **per_CE_contextp,
|
|
|
- void **per_transfer_contextp,
|
|
|
- qdf_dma_addr_t *bufferp,
|
|
|
- unsigned int *nbytesp,
|
|
|
- unsigned int *transfer_idp,
|
|
|
- unsigned int *sw_idx, unsigned int *hw_idx,
|
|
|
- uint32_t *toeplitz_hash_result);
|
|
|
|
|
|
void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
|
|
|
u32 ctrl_addr, unsigned int write_index)
|
|
@@ -316,7 +307,7 @@ static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
|
|
|
#endif
|
|
|
|
|
|
int
|
|
|
-ce_send_nolock(struct CE_handle *copyeng,
|
|
|
+ce_send_nolock_legacy(struct CE_handle *copyeng,
|
|
|
void *per_transfer_context,
|
|
|
qdf_dma_addr_t buffer,
|
|
|
uint32_t nbytes,
|
|
@@ -421,9 +412,11 @@ ce_send(struct CE_handle *copyeng,
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
int status;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
|
|
|
|
|
|
qdf_spin_lock_bh(&CE_state->ce_index_lock);
|
|
|
- status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
|
|
|
+ status = hif_state->ce_services->ce_send_nolock(copyeng,
|
|
|
+ per_transfer_context, buffer, nbytes,
|
|
|
transfer_id, flags, user_flag);
|
|
|
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
|
|
|
|
@@ -471,6 +464,17 @@ int
|
|
|
ce_sendlist_send(struct CE_handle *copyeng,
|
|
|
void *per_transfer_context,
|
|
|
struct ce_sendlist *sendlist, unsigned int transfer_id)
|
|
|
+{
|
|
|
+ struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
|
|
|
+
|
|
|
+ return hif_state->ce_services->ce_sendlist_send(copyeng,
|
|
|
+ per_transfer_context, sendlist, transfer_id);
|
|
|
+}
|
|
|
+int
|
|
|
+ce_sendlist_send_legacy(struct CE_handle *copyeng,
|
|
|
+ void *per_transfer_context,
|
|
|
+ struct ce_sendlist *sendlist, unsigned int transfer_id)
|
|
|
{
|
|
|
int status = -ENOMEM;
|
|
|
struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
|
|
@@ -497,7 +501,8 @@ ce_sendlist_send(struct CE_handle *copyeng,
|
|
|
item = &sl->item[i];
|
|
|
/* TBDXXX: Support extensible sendlist_types? */
|
|
|
QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
|
|
|
- status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
|
|
|
+ status = ce_send_nolock_legacy(copyeng,
|
|
|
+ CE_SENDLIST_ITEM_CTXT,
|
|
|
(qdf_dma_addr_t) item->data,
|
|
|
item->u.nbytes, transfer_id,
|
|
|
item->flags | CE_SEND_FLAG_GATHER,
|
|
@@ -508,7 +513,7 @@ ce_sendlist_send(struct CE_handle *copyeng,
|
|
|
item = &sl->item[i];
|
|
|
/* TBDXXX: Support extensible sendlist_types? */
|
|
|
QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
|
|
|
- status = ce_send_nolock(copyeng, per_transfer_context,
|
|
|
+ status = ce_send_nolock_legacy(copyeng, per_transfer_context,
|
|
|
(qdf_dma_addr_t) item->data,
|
|
|
item->u.nbytes,
|
|
|
transfer_id, item->flags,
|
|
@@ -950,6 +955,7 @@ int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
|
|
|
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
+
|
|
|
/**
|
|
|
* ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
|
|
|
* @coyeng: copy engine handle
|
|
@@ -961,6 +967,25 @@ int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
|
|
|
int
|
|
|
ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
|
|
void *per_recv_context, qdf_dma_addr_t buffer)
|
|
|
+{
|
|
|
+ struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
|
|
|
+
|
|
|
+ return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
|
|
|
+ per_recv_context, buffer);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
|
|
|
+ * @coyeng: copy engine handle
|
|
|
+ * @per_recv_context: virtual address of the nbuf
|
|
|
+ * @buffer: physical address of the nbuf
|
|
|
+ *
|
|
|
+ * Return: 0 if the buffer is enqueued
|
|
|
+ */
|
|
|
+int
|
|
|
+ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
|
|
|
+ void *per_recv_context, qdf_dma_addr_t buffer)
|
|
|
{
|
|
|
int status;
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
@@ -1083,7 +1108,7 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
|
|
|
* The caller takes responsibility for any necessary locking.
|
|
|
*/
|
|
|
unsigned int
|
|
|
-ce_send_entries_done_nolock(struct hif_softc *scn,
|
|
|
+ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
|
|
|
struct CE_state *CE_state)
|
|
|
{
|
|
|
struct CE_ring_state *src_ring = CE_state->src_ring;
|
|
@@ -1102,9 +1127,12 @@ unsigned int ce_send_entries_done(struct CE_handle *copyeng)
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
unsigned int nentries;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
|
|
|
qdf_spin_lock(&CE_state->ce_index_lock);
|
|
|
- nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
|
|
|
+ nentries = hif_state->ce_services->ce_send_entries_done_nolock(
|
|
|
+ CE_state->scn, CE_state);
|
|
|
qdf_spin_unlock(&CE_state->ce_index_lock);
|
|
|
|
|
|
return nentries;
|
|
@@ -1115,7 +1143,7 @@ unsigned int ce_send_entries_done(struct CE_handle *copyeng)
|
|
|
* The caller takes responsibility for any necessary locking.
|
|
|
*/
|
|
|
unsigned int
|
|
|
-ce_recv_entries_done_nolock(struct hif_softc *scn,
|
|
|
+ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
|
|
|
struct CE_state *CE_state)
|
|
|
{
|
|
|
struct CE_ring_state *dest_ring = CE_state->dest_ring;
|
|
@@ -1134,9 +1162,12 @@ unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
unsigned int nentries;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
|
|
|
qdf_spin_lock(&CE_state->ce_index_lock);
|
|
|
- nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
|
|
|
+ nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
|
|
|
+ CE_state->scn, CE_state);
|
|
|
qdf_spin_unlock(&CE_state->ce_index_lock);
|
|
|
|
|
|
return nentries;
|
|
@@ -1153,7 +1184,7 @@ void *ce_debug_cmplsn_context; /* completed send next context */
|
|
|
* The caller takes responsibility for any necessary locking.
|
|
|
*/
|
|
|
int
|
|
|
-ce_completed_recv_next_nolock(struct CE_state *CE_state,
|
|
|
+ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
|
|
|
void **per_CE_contextp,
|
|
|
void **per_transfer_contextp,
|
|
|
qdf_dma_addr_t *bufferp,
|
|
@@ -1231,22 +1262,37 @@ ce_completed_recv_next(struct CE_handle *copyeng,
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
int status;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
+ struct ce_ops *ce_services;
|
|
|
|
|
|
+ ce_services = hif_state->ce_services;
|
|
|
qdf_spin_lock_bh(&CE_state->ce_index_lock);
|
|
|
status =
|
|
|
- ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
|
|
|
- per_transfer_contextp, bufferp,
|
|
|
+ ce_services->ce_completed_recv_next_nolock(CE_state,
|
|
|
+ per_CE_contextp, per_transfer_contextp, bufferp,
|
|
|
nbytesp, transfer_idp, flagsp);
|
|
|
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
|
|
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-/* NB: Modeled after ce_completed_recv_next_nolock */
|
|
|
QDF_STATUS
|
|
|
ce_revoke_recv_next(struct CE_handle *copyeng,
|
|
|
void **per_CE_contextp,
|
|
|
void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
|
|
|
+{
|
|
|
+ struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
|
|
|
+
|
|
|
+ return hif_state->ce_services->ce_revoke_recv_next(copyeng,
|
|
|
+ per_CE_contextp, per_transfer_contextp, bufferp);
|
|
|
+}
|
|
|
+/* NB: Modeled after ce_completed_recv_next_nolock */
|
|
|
+QDF_STATUS
|
|
|
+ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
|
|
|
+ void **per_CE_contextp,
|
|
|
+ void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
|
|
|
{
|
|
|
struct CE_state *CE_state;
|
|
|
struct CE_ring_state *dest_ring;
|
|
@@ -1305,7 +1351,7 @@ ce_revoke_recv_next(struct CE_handle *copyeng,
|
|
|
* The caller takes responsibility for any necessary locking.
|
|
|
*/
|
|
|
int
|
|
|
-ce_completed_send_next_nolock(struct CE_state *CE_state,
|
|
|
+ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
|
|
|
void **per_CE_contextp,
|
|
|
void **per_transfer_contextp,
|
|
|
qdf_dma_addr_t *bufferp,
|
|
@@ -1392,7 +1438,6 @@ ce_completed_send_next_nolock(struct CE_state *CE_state,
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-/* NB: Modeled after ce_completed_send_next */
|
|
|
QDF_STATUS
|
|
|
ce_cancel_send_next(struct CE_handle *copyeng,
|
|
|
void **per_CE_contextp,
|
|
@@ -1401,6 +1446,24 @@ ce_cancel_send_next(struct CE_handle *copyeng,
|
|
|
unsigned int *nbytesp,
|
|
|
unsigned int *transfer_idp,
|
|
|
uint32_t *toeplitz_hash_result)
|
|
|
+{
|
|
|
+ struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
|
|
|
+
|
|
|
+ return hif_state->ce_services->ce_cancel_send_next
|
|
|
+ (copyeng, per_CE_contextp, per_transfer_contextp,
|
|
|
+ bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
|
|
|
+}
|
|
|
+
|
|
|
+/* NB: Modeled after ce_completed_send_next */
|
|
|
+QDF_STATUS
|
|
|
+ce_cancel_send_next_legacy(struct CE_handle *copyeng,
|
|
|
+ void **per_CE_contextp,
|
|
|
+ void **per_transfer_contextp,
|
|
|
+ qdf_dma_addr_t *bufferp,
|
|
|
+ unsigned int *nbytesp,
|
|
|
+ unsigned int *transfer_idp,
|
|
|
+ uint32_t *toeplitz_hash_result)
|
|
|
{
|
|
|
struct CE_state *CE_state;
|
|
|
struct CE_ring_state *src_ring;
|
|
@@ -1476,13 +1539,17 @@ ce_completed_send_next(struct CE_handle *copyeng,
|
|
|
unsigned int *toeplitz_hash_result)
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
+ struct ce_ops *ce_services;
|
|
|
int status;
|
|
|
|
|
|
+ ce_services = hif_state->ce_services;
|
|
|
qdf_spin_lock_bh(&CE_state->ce_index_lock);
|
|
|
status =
|
|
|
- ce_completed_send_next_nolock(CE_state, per_CE_contextp,
|
|
|
- per_transfer_contextp, bufferp,
|
|
|
- nbytesp, transfer_idp, sw_idx,
|
|
|
+ ce_services->ce_completed_send_next_nolock(CE_state,
|
|
|
+ per_CE_contextp, per_transfer_contextp,
|
|
|
+ bufferp, nbytesp, transfer_idp, sw_idx,
|
|
|
hw_idx, toeplitz_hash_result);
|
|
|
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
|
|
|
|
@@ -1509,6 +1576,7 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
|
|
|
unsigned int sw_idx, hw_idx;
|
|
|
uint32_t toeplitz_hash_result;
|
|
|
struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
|
|
|
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
|
|
|
return;
|
|
@@ -1533,10 +1601,11 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
|
|
|
|
|
|
if (CE_state->send_cb) {
|
|
|
{
|
|
|
+ struct ce_ops *ce_services = hif_state->ce_services;
|
|
|
/* Pop completed send buffers and call the
|
|
|
* registered send callback for each
|
|
|
*/
|
|
|
- while (ce_completed_send_next_nolock
|
|
|
+ while (ce_services->ce_completed_send_next_nolock
|
|
|
(CE_state, &CE_context,
|
|
|
&transfer_context, &buf,
|
|
|
&nbytes, &id, &sw_idx, &hw_idx,
|
|
@@ -1768,7 +1837,7 @@ more_data:
|
|
|
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
|
|
|
HOST_IS_COPY_COMPLETE_MASK);
|
|
|
|
|
|
- if (ce_recv_entries_done_nolock(scn, ce_state)) {
|
|
|
+ if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
|
|
|
if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
|
|
|
goto more_data;
|
|
|
} else {
|
|
@@ -1799,13 +1868,13 @@ int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
|
|
|
{
|
|
|
struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
|
|
|
uint32_t ctrl_addr = CE_state->ctrl_addr;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
void *CE_context;
|
|
|
void *transfer_context;
|
|
|
qdf_dma_addr_t buf;
|
|
|
unsigned int nbytes;
|
|
|
unsigned int id;
|
|
|
unsigned int flags;
|
|
|
- uint32_t CE_int_status;
|
|
|
unsigned int more_comp_cnt = 0;
|
|
|
unsigned int more_snd_comp_cnt = 0;
|
|
|
unsigned int sw_idx, hw_idx;
|
|
@@ -1844,7 +1913,7 @@ more_completions:
|
|
|
/* Pop completed recv buffers and call
|
|
|
* the registered recv callback for each
|
|
|
*/
|
|
|
- while (ce_completed_recv_next_nolock
|
|
|
+ while (hif_state->ce_services->ce_completed_recv_next_nolock
|
|
|
(CE_state, &CE_context, &transfer_context,
|
|
|
&buf, &nbytes, &id, &flags) ==
|
|
|
QDF_STATUS_SUCCESS) {
|
|
@@ -1890,7 +1959,7 @@ more_completions:
|
|
|
*/
|
|
|
|
|
|
#ifdef ATH_11AC_TXCOMPACT
|
|
|
- while (ce_completed_send_next_nolock
|
|
|
+ while (hif_state->ce_services->ce_completed_send_next_nolock
|
|
|
(CE_state, &CE_context,
|
|
|
&transfer_context, &buf, &nbytes,
|
|
|
&id, &sw_idx, &hw_idx,
|
|
@@ -1916,7 +1985,7 @@ more_completions:
|
|
|
}
|
|
|
}
|
|
|
#else /*ATH_11AC_TXCOMPACT */
|
|
|
- while (ce_completed_send_next_nolock
|
|
|
+ while (hif_state->ce_services->ce_completed_send_next_nolock
|
|
|
(CE_state, &CE_context,
|
|
|
&transfer_context, &buf, &nbytes,
|
|
|
&id, &sw_idx, &hw_idx,
|
|
@@ -1933,20 +2002,14 @@ more_completions:
|
|
|
|
|
|
more_watermarks:
|
|
|
if (CE_state->misc_cbs) {
|
|
|
- CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
|
|
|
- if (CE_int_status & CE_WATERMARK_MASK) {
|
|
|
- if (CE_state->watermark_cb) {
|
|
|
- qdf_spin_unlock(&CE_state->ce_index_lock);
|
|
|
- /* Convert HW IS bits to software flags */
|
|
|
- flags =
|
|
|
- (CE_int_status & CE_WATERMARK_MASK) >>
|
|
|
- CE_WM_SHFT;
|
|
|
-
|
|
|
- CE_state->
|
|
|
- watermark_cb((struct CE_handle *)CE_state,
|
|
|
- CE_state->wm_context, flags);
|
|
|
- qdf_spin_lock(&CE_state->ce_index_lock);
|
|
|
- }
|
|
|
+ if (CE_state->watermark_cb &&
|
|
|
+ hif_state->ce_services->watermark_int(CE_state,
|
|
|
+ &flags)) {
|
|
|
+ qdf_spin_unlock(&CE_state->ce_index_lock);
|
|
|
+ /* Convert HW IS bits to software flags */
|
|
|
+ CE_state->watermark_cb((struct CE_handle *)CE_state,
|
|
|
+ CE_state->wm_context, flags);
|
|
|
+ qdf_spin_lock(&CE_state->ce_index_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1957,7 +2020,8 @@ more_watermarks:
|
|
|
* more copy completions happened while the misc interrupts were being
|
|
|
* handled.
|
|
|
*/
|
|
|
- CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
|
|
|
+ if (!ce_srng_based(scn))
|
|
|
+ CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
|
|
|
CE_WATERMARK_MASK |
|
|
|
HOST_IS_COPY_COMPLETE_MASK);
|
|
|
|
|
@@ -1968,7 +2032,9 @@ more_watermarks:
|
|
|
* misc interrupts.Go back and check again.Keep checking until
|
|
|
* we find no more events to process.
|
|
|
*/
|
|
|
- if (CE_state->recv_cb && ce_recv_entries_done_nolock(scn, CE_state)) {
|
|
|
+ if (CE_state->recv_cb &&
|
|
|
+ hif_state->ce_services->ce_recv_entries_done_nolock(scn,
|
|
|
+ CE_state)) {
|
|
|
if (QDF_IS_EPPING_ENABLED(mode) ||
|
|
|
more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
|
|
|
goto more_completions;
|
|
@@ -1982,7 +2048,9 @@ more_watermarks:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (CE_state->send_cb && ce_send_entries_done_nolock(scn, CE_state)) {
|
|
|
+ if (CE_state->send_cb &&
|
|
|
+ hif_state->ce_services->ce_send_entries_done_nolock(scn,
|
|
|
+ CE_state)) {
|
|
|
if (QDF_IS_EPPING_ENABLED(mode) ||
|
|
|
more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
|
|
|
goto more_completions;
|
|
@@ -1996,13 +2064,9 @@ more_watermarks:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (CE_state->misc_cbs) {
|
|
|
- CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
|
|
|
- if (CE_int_status & CE_WATERMARK_MASK) {
|
|
|
- if (CE_state->watermark_cb) {
|
|
|
- goto more_watermarks;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (CE_state->misc_cbs && CE_state->watermark_cb) {
|
|
|
+ if (hif_state->ce_services->watermark_int(CE_state, &flags))
|
|
|
+ goto more_watermarks;
|
|
|
}
|
|
|
|
|
|
qdf_atomic_set(&CE_state->rx_pending, 0);
|
|
@@ -2065,7 +2129,7 @@ void ce_per_engine_service_any(int irq, struct hif_softc *scn)
|
|
|
* Called with target_lock held.
|
|
|
*/
|
|
|
static void
|
|
|
-ce_per_engine_handler_adjust(struct CE_state *CE_state,
|
|
|
+ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
|
|
|
int disable_copy_compl_intr)
|
|
|
{
|
|
|
uint32_t ctrl_addr = CE_state->ctrl_addr;
|
|
@@ -2166,6 +2230,8 @@ ce_send_cb_register(struct CE_handle *copyeng,
|
|
|
void *ce_send_context, int disable_interrupts)
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
|
|
|
if (CE_state == NULL) {
|
|
|
HIF_ERROR("%s: Error CE state = NULL", __func__);
|
|
@@ -2173,7 +2239,8 @@ ce_send_cb_register(struct CE_handle *copyeng,
|
|
|
}
|
|
|
CE_state->send_context = ce_send_context;
|
|
|
CE_state->send_cb = fn_ptr;
|
|
|
- ce_per_engine_handler_adjust(CE_state, disable_interrupts);
|
|
|
+ hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
|
|
|
+ disable_interrupts);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2194,6 +2261,8 @@ ce_recv_cb_register(struct CE_handle *copyeng,
|
|
|
void *CE_recv_context, int disable_interrupts)
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
|
|
|
if (CE_state == NULL) {
|
|
|
HIF_ERROR("%s: ERROR CE state = NULL", __func__);
|
|
@@ -2201,7 +2270,8 @@ ce_recv_cb_register(struct CE_handle *copyeng,
|
|
|
}
|
|
|
CE_state->recv_context = CE_recv_context;
|
|
|
CE_state->recv_cb = fn_ptr;
|
|
|
- ce_per_engine_handler_adjust(CE_state, disable_interrupts);
|
|
|
+ hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
|
|
|
+ disable_interrupts);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2217,10 +2287,13 @@ ce_watermark_cb_register(struct CE_handle *copyeng,
|
|
|
CE_watermark_cb fn_ptr, void *CE_wm_context)
|
|
|
{
|
|
|
struct CE_state *CE_state = (struct CE_state *)copyeng;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+ struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
|
|
|
|
CE_state->watermark_cb = fn_ptr;
|
|
|
CE_state->wm_context = CE_wm_context;
|
|
|
- ce_per_engine_handler_adjust(CE_state, 0);
|
|
|
+ hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
|
|
|
+ 0);
|
|
|
if (fn_ptr) {
|
|
|
CE_state->misc_cbs = 1;
|
|
|
}
|
|
@@ -2308,3 +2381,156 @@ void ce_ipa_get_resource(struct CE_handle *ce,
|
|
|
}
|
|
|
#endif /* IPA_OFFLOAD */
|
|
|
|
|
|
+bool ce_check_int_watermark(struct CE_state *CE_state, unsigned int *flags)
|
|
|
+{
|
|
|
+ uint32_t ce_int_status;
|
|
|
+ uint32_t ctrl_addr = CE_state->ctrl_addr;
|
|
|
+ struct hif_softc *scn = CE_state->scn;
|
|
|
+
|
|
|
+ ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
|
|
|
+ if (ce_int_status & CE_WATERMARK_MASK) {
|
|
|
+ /* Convert HW IS bits to software flags */
|
|
|
+ *flags =
|
|
|
+ (ce_int_status & CE_WATERMARK_MASK) >>
|
|
|
+ CE_WM_SHFT;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
|
|
|
+ struct CE_ring_state *src_ring,
|
|
|
+ struct CE_attr *attr)
|
|
|
+{
|
|
|
+ uint32_t ctrl_addr;
|
|
|
+ uint64_t dma_addr;
|
|
|
+
|
|
|
+ QDF_ASSERT(ce_id < scn->ce_count);
|
|
|
+ ctrl_addr = CE_BASE_ADDRESS(ce_id);
|
|
|
+
|
|
|
+ src_ring->hw_index =
|
|
|
+ CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
|
|
|
+ src_ring->sw_index = src_ring->hw_index;
|
|
|
+ src_ring->write_index =
|
|
|
+ CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
|
|
|
+ dma_addr = src_ring->base_addr_CE_space;
|
|
|
+ CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
|
|
|
+ (uint32_t)(dma_addr & 0xFFFFFFFF));
|
|
|
+
|
|
|
+ /* if SR_BA_ADDRESS_HIGH register exists */
|
|
|
+ if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
|
|
|
+ uint32_t tmp;
|
|
|
+
|
|
|
+ tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
|
|
|
+ scn, ctrl_addr);
|
|
|
+ tmp &= ~0x1F;
|
|
|
+ dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
|
|
|
+ CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
|
|
|
+ ctrl_addr, (uint32_t)dma_addr);
|
|
|
+ }
|
|
|
+ CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
|
|
|
+ CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
|
|
|
+#ifdef BIG_ENDIAN_HOST
|
|
|
+ /* Enable source ring byte swap for big endian host */
|
|
|
+ CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
|
|
|
+#endif
|
|
|
+ CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
|
|
|
+ CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
|
|
|
+ struct CE_ring_state *dest_ring,
|
|
|
+ struct CE_attr *attr)
|
|
|
+{
|
|
|
+ uint32_t ctrl_addr;
|
|
|
+ uint64_t dma_addr;
|
|
|
+
|
|
|
+ QDF_ASSERT(ce_id < scn->ce_count);
|
|
|
+ ctrl_addr = CE_BASE_ADDRESS(ce_id);
|
|
|
+ dest_ring->sw_index =
|
|
|
+ CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
|
|
|
+ dest_ring->write_index =
|
|
|
+ CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
|
|
|
+ dma_addr = dest_ring->base_addr_CE_space;
|
|
|
+ CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
|
|
|
+ (uint32_t)(dma_addr & 0xFFFFFFFF));
|
|
|
+
|
|
|
+ /* if DR_BA_ADDRESS_HIGH exists */
|
|
|
+ if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
|
|
|
+ uint32_t tmp;
|
|
|
+
|
|
|
+ tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
|
|
|
+ ctrl_addr);
|
|
|
+ tmp &= ~0x1F;
|
|
|
+ dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
|
|
|
+ CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
|
|
|
+ ctrl_addr, (uint32_t)dma_addr);
|
|
|
+ }
|
|
|
+
|
|
|
+ CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
|
|
|
+#ifdef BIG_ENDIAN_HOST
|
|
|
+ /* Enable Dest ring byte swap for big endian host */
|
|
|
+ CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
|
|
|
+#endif
|
|
|
+ CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
|
|
|
+ CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
|
|
|
+{
|
|
|
+ switch (ring_type) {
|
|
|
+ case CE_RING_SRC:
|
|
|
+ return sizeof(struct CE_src_desc);
|
|
|
+ case CE_RING_DEST:
|
|
|
+ return sizeof(struct CE_dest_desc);
|
|
|
+ case CE_RING_STATUS:
|
|
|
+ qdf_assert(0);
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
|
|
|
+ uint32_t ce_id, struct CE_ring_state *ring,
|
|
|
+ struct CE_attr *attr)
|
|
|
+{
|
|
|
+ switch (ring_type) {
|
|
|
+ case CE_RING_SRC:
|
|
|
+ ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
|
|
|
+ break;
|
|
|
+ case CE_RING_DEST:
|
|
|
+ ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
|
|
|
+ break;
|
|
|
+ case CE_RING_STATUS:
|
|
|
+ default:
|
|
|
+ qdf_assert(0);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+struct ce_ops ce_service_legacy = {
|
|
|
+ .ce_get_desc_size = ce_get_desc_size_legacy,
|
|
|
+ .ce_ring_setup = ce_ring_setup_legacy,
|
|
|
+ .ce_sendlist_send = ce_sendlist_send_legacy,
|
|
|
+ .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
|
|
|
+ .ce_revoke_recv_next = ce_revoke_recv_next_legacy,
|
|
|
+ .ce_cancel_send_next = ce_cancel_send_next_legacy,
|
|
|
+ .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
|
|
|
+ .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
|
|
|
+ .ce_send_nolock = ce_send_nolock_legacy,
|
|
|
+ .watermark_int = ce_check_int_watermark,
|
|
|
+ .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
|
|
|
+ .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
|
|
|
+ .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
|
|
|
+};
|
|
|
+
|
|
|
+
|
|
|
+struct ce_ops *ce_services_legacy()
|
|
|
+{
|
|
|
+ return &ce_service_legacy;
|
|
|
+}
|