qcacld-3.0: Replace target_lock with ce_index_lock

Target_lock was a global lock for all target and copy engine data
structure access. Replacing target_lock with a per copy engine
lock permits paralel completion handling for different copy engines.

Change-Id: I9c09d557c85f9e48beafe5e3f936e105183ddd3f
CRs-Fixed: 941355
This commit is contained in:
Houston Hoffman
2015-09-03 17:01:22 -07:00
committed by Satish Singh
parent 15ec6c6895
commit a499f30bd5
5 changed files with 47 additions and 50 deletions

View File

@@ -290,7 +290,6 @@ struct ol_softc {
* Guard changes to Target HW state and to software
* structures that track hardware state.
*/
cdf_spinlock_t target_lock;
unsigned int ce_count; /* Number of Copy Engines supported */
struct CE_state *ce_id_to_state[CE_COUNT_MAX]; /* CE id to CE_state */
#ifdef FEATURE_NAPI

View File

@@ -129,6 +129,8 @@ struct CE_state {
struct CE_ring_state *src_ring;
struct CE_ring_state *dest_ring;
atomic_t rx_pending;
cdf_spinlock_t ce_index_lock;
bool force_break; /* Flag to indicate whether to
* break out the DPC context */

View File

@@ -141,6 +141,7 @@ struct CE_handle *ce_init(struct ol_softc *scn,
malloc_CE_state = true;
cdf_mem_zero(CE_state, sizeof(*CE_state));
scn->ce_id_to_state[CE_id] = CE_state;
cdf_spinlock_init(&CE_state->ce_index_lock);
CE_state->id = CE_id;
CE_state->ctrl_addr = ctrl_addr;

View File

@@ -261,10 +261,10 @@ ce_send(struct CE_handle *copyeng,
struct CE_state *CE_state = (struct CE_state *)copyeng;
int status;
cdf_spin_lock_bh(&CE_state->scn->target_lock);
cdf_spin_lock_bh(&CE_state->ce_index_lock);
status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
transfer_id, flags, user_flag);
cdf_spin_unlock_bh(&CE_state->scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -322,7 +322,7 @@ ce_sendlist_send(struct CE_handle *copyeng,
CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
cdf_spin_lock_bh(&CE_state->scn->target_lock);
cdf_spin_lock_bh(&CE_state->ce_index_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
@@ -367,7 +367,7 @@ ce_sendlist_send(struct CE_handle *copyeng,
* the entire request at once, punt it back to the caller.
*/
}
cdf_spin_unlock_bh(&CE_state->scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -427,12 +427,7 @@ int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
uint64_t dma_addr;
uint32_t user_flags = 0;
/*
* This lock could be more fine-grained, one per CE,
* TODO : Add this lock now.
* That is the next step of optimization.
*/
cdf_spin_lock_bh(&scn->target_lock);
cdf_spin_lock_bh(&ce_state->ce_index_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
@@ -524,7 +519,7 @@ int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
}
cdf_spin_unlock_bh(&scn->target_lock);
cdf_spin_unlock_bh(&ce_state->ce_index_lock);
/*
* If all packets in the array are transmitted,
@@ -551,13 +546,13 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
uint64_t dma_addr = buffer;
struct ol_softc *scn = CE_state->scn;
cdf_spin_lock_bh(&scn->target_lock);
cdf_spin_lock_bh(&CE_state->ce_index_lock);
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
if (val == -1) {
cdf_spin_unlock_bh(&scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return val;
}
@@ -589,11 +584,11 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
}
A_TARGET_ACCESS_END_RET_EXT(scn, val);
if (val == -1) {
cdf_spin_unlock_bh(&scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return val;
}
cdf_spin_unlock_bh(&scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -634,10 +629,10 @@ unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
unsigned int sw_index;
unsigned int write_index;
cdf_spin_lock(&CE_state->scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
cdf_spin_unlock(&CE_state->scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
@@ -650,10 +645,10 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
unsigned int sw_index;
unsigned int write_index;
cdf_spin_lock(&CE_state->scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
sw_index = dest_ring->sw_index;
write_index = dest_ring->write_index;
cdf_spin_unlock(&CE_state->scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
@@ -683,9 +678,9 @@ unsigned int ce_send_entries_done(struct CE_handle *copyeng)
struct CE_state *CE_state = (struct CE_state *)copyeng;
unsigned int nentries;
cdf_spin_lock(&CE_state->scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
cdf_spin_unlock(&CE_state->scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
return nentries;
}
@@ -715,9 +710,9 @@ unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
struct CE_state *CE_state = (struct CE_state *)copyeng;
unsigned int nentries;
cdf_spin_lock(&CE_state->scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
cdf_spin_unlock(&CE_state->scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
return nentries;
}
@@ -807,12 +802,12 @@ ce_completed_recv_next(struct CE_handle *copyeng,
struct CE_state *CE_state = (struct CE_state *)copyeng;
int status;
cdf_spin_lock_bh(&CE_state->scn->target_lock);
cdf_spin_lock_bh(&CE_state->ce_index_lock);
status =
ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
per_transfer_contextp, bufferp,
nbytesp, transfer_idp, flagsp);
cdf_spin_unlock_bh(&CE_state->scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -838,7 +833,7 @@ ce_revoke_recv_next(struct CE_handle *copyeng,
}
scn = CE_state->scn;
cdf_spin_lock(&scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
nentries_mask = dest_ring->nentries_mask;
sw_index = dest_ring->sw_index;
write_index = dest_ring->write_index;
@@ -870,7 +865,7 @@ ce_revoke_recv_next(struct CE_handle *copyeng,
} else {
status = CDF_STATUS_E_FAILURE;
}
cdf_spin_unlock(&scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
return status;
}
@@ -984,7 +979,7 @@ ce_cancel_send_next(struct CE_handle *copyeng,
}
scn = CE_state->scn;
cdf_spin_lock(&CE_state->scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
nentries_mask = src_ring->nentries_mask;
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
@@ -1023,7 +1018,7 @@ ce_cancel_send_next(struct CE_handle *copyeng,
} else {
status = CDF_STATUS_E_FAILURE;
}
cdf_spin_unlock(&CE_state->scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
return status;
}
@@ -1045,13 +1040,13 @@ ce_completed_send_next(struct CE_handle *copyeng,
struct CE_state *CE_state = (struct CE_state *)copyeng;
int status;
cdf_spin_lock_bh(&CE_state->scn->target_lock);
cdf_spin_lock_bh(&CE_state->ce_index_lock);
status =
ce_completed_send_next_nolock(CE_state, per_CE_contextp,
per_transfer_contextp, bufferp,
nbytesp, transfer_idp, sw_idx,
hw_idx, toeplitz_hash_result);
cdf_spin_unlock_bh(&CE_state->scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -1092,7 +1087,7 @@ void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
* addressed by change spin_lock to spin_lock_bh also.
*/
cdf_spin_lock_bh(&scn->target_lock);
cdf_spin_lock_bh(&CE_state->ce_index_lock);
if (CE_state->send_cb) {
{
@@ -1106,14 +1101,16 @@ void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
&toeplitz_hash_result) ==
CDF_STATUS_SUCCESS) {
if (CE_id != CE_HTT_H2T_MSG) {
cdf_spin_unlock_bh(&scn->target_lock);
CE_state->
send_cb((struct CE_handle *)
cdf_spin_unlock_bh(
&CE_state->ce_index_lock);
CE_state->send_cb(
(struct CE_handle *)
CE_state, CE_context,
transfer_context, buf,
nbytes, id, sw_idx, hw_idx,
toeplitz_hash_result);
cdf_spin_lock_bh(&scn->target_lock);
cdf_spin_lock_bh(
&CE_state->ce_index_lock);
} else {
struct HIF_CE_pipe_info *pipe_info =
(struct HIF_CE_pipe_info *)
@@ -1129,7 +1126,7 @@ void ce_per_engine_servicereap(struct ol_softc *scn, unsigned int CE_id)
}
}
cdf_spin_unlock_bh(&scn->target_lock);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
A_TARGET_ACCESS_END(scn);
}
@@ -1175,7 +1172,7 @@ int ce_per_engine_service(struct ol_softc *scn, unsigned int CE_id)
return 0; /* no work done */
}
cdf_spin_lock(&scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
/* Clear force_break flag and re-initialize receive_count to 0 */
@@ -1192,7 +1189,7 @@ more_completions:
(CE_state, &CE_context, &transfer_context,
&buf, &nbytes, &id, &flags) ==
CDF_STATUS_SUCCESS) {
cdf_spin_unlock(&scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
CE_state->recv_cb((struct CE_handle *)CE_state,
CE_context, transfer_context, buf,
nbytes, id, flags);
@@ -1223,7 +1220,7 @@ more_completions:
CE_state->receive_count);
return CE_state->receive_count;
}
cdf_spin_lock(&scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
}
}
@@ -1247,12 +1244,12 @@ more_completions:
if (CE_id != CE_HTT_H2T_MSG ||
WLAN_IS_EPPING_ENABLED(cds_get_conparam())) {
cdf_spin_unlock(&scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
CE_state->send_cb((struct CE_handle *)CE_state,
CE_context, transfer_context,
buf, nbytes, id, sw_idx,
hw_idx, toeplitz_hash_result);
cdf_spin_lock(&scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
} else {
struct HIF_CE_pipe_info *pipe_info =
(struct HIF_CE_pipe_info *)CE_context;
@@ -1270,12 +1267,12 @@ more_completions:
&transfer_context, &buf, &nbytes,
&id, &sw_idx, &hw_idx,
&toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
cdf_spin_unlock(&scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
CE_state->send_cb((struct CE_handle *)CE_state,
CE_context, transfer_context, buf,
nbytes, id, sw_idx, hw_idx,
toeplitz_hash_result);
cdf_spin_lock(&scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
}
#endif /*ATH_11AC_TXCOMPACT */
}
@@ -1285,8 +1282,7 @@ more_watermarks:
CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
if (CE_int_status & CE_WATERMARK_MASK) {
if (CE_state->watermark_cb) {
cdf_spin_unlock(&scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
/* Convert HW IS bits to software flags */
flags =
(CE_int_status & CE_WATERMARK_MASK) >>
@@ -1295,7 +1291,7 @@ more_watermarks:
CE_state->
watermark_cb((struct CE_handle *)CE_state,
CE_state->wm_context, flags);
cdf_spin_lock(&scn->target_lock);
cdf_spin_lock(&CE_state->ce_index_lock);
}
}
}
@@ -1355,7 +1351,7 @@ more_watermarks:
}
}
cdf_spin_unlock(&scn->target_lock);
cdf_spin_unlock(&CE_state->ce_index_lock);
cdf_atomic_set(&CE_state->rx_pending, 0);
if (Q_TARGET_ACCESS_END(scn) < 0)

View File

@@ -535,7 +535,6 @@ CDF_STATUS hif_open(void)
cdf_atomic_init(&scn->link_suspended);
cdf_atomic_init(&scn->tasklet_from_intr);
init_waitqueue_head(&scn->aps_osdev.event_queue);
cdf_spinlock_init(&scn->target_lock);
scn->linkstate_vote = 0;
return status;
}