qcacmn: Replace A_TARGET_ACCESS_BEGIN/END_RET_PTR
Macros should not alter the execution of function bodies. Fix possible memory leaks that this cleanup exposed. Change-Id: I546c5822d7c28e0c9dd77094a5bb0f7e3e7544d4 CRs-Fixed: 986480
This commit is contained in:

کامیت شده توسط
Gerrit - the friendly Code Review server

والد
987ab445de
کامیت
4411ad4d0c
@@ -202,13 +202,16 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
ptr += sizeof(struct CE_ring_state);
|
||||
src_ring->nentries = nentries;
|
||||
src_ring->nentries_mask = nentries - 1;
|
||||
A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
|
||||
goto error_target_access;
|
||||
src_ring->hw_index =
|
||||
CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
|
||||
src_ring->sw_index = src_ring->hw_index;
|
||||
src_ring->write_index =
|
||||
CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
|
||||
A_TARGET_ACCESS_END_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0)
|
||||
goto error_target_access;
|
||||
|
||||
src_ring->low_water_mark_nentries = 0;
|
||||
src_ring->high_water_mark_nentries = nentries;
|
||||
src_ring->per_transfer_context = (void **)ptr;
|
||||
@@ -270,7 +273,8 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
CE_DESC_RING_ALIGN - 1) &
|
||||
~(CE_DESC_RING_ALIGN - 1));
|
||||
|
||||
A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
|
||||
goto error_target_access;
|
||||
dma_addr = src_ring->base_addr_CE_space;
|
||||
CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
|
||||
(uint32_t)(dma_addr & 0xFFFFFFFF));
|
||||
@@ -293,7 +297,8 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
#endif
|
||||
CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
|
||||
CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
|
||||
A_TARGET_ACCESS_END_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0)
|
||||
goto error_target_access;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,12 +344,15 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
ptr += sizeof(struct CE_ring_state);
|
||||
dest_ring->nentries = nentries;
|
||||
dest_ring->nentries_mask = nentries - 1;
|
||||
A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
|
||||
goto error_target_access;
|
||||
dest_ring->sw_index =
|
||||
CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
|
||||
dest_ring->write_index =
|
||||
CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
|
||||
A_TARGET_ACCESS_END_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0)
|
||||
goto error_target_access;
|
||||
|
||||
dest_ring->low_water_mark_nentries = 0;
|
||||
dest_ring->high_water_mark_nentries = nentries;
|
||||
dest_ring->per_transfer_context = (void **)ptr;
|
||||
@@ -398,7 +406,8 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
base_addr_owner_space_unaligned;
|
||||
}
|
||||
|
||||
A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
|
||||
goto error_target_access;
|
||||
dma_addr = dest_ring->base_addr_CE_space;
|
||||
CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
|
||||
(uint32_t)(dma_addr & 0xFFFFFFFF));
|
||||
@@ -420,7 +429,8 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
#endif
|
||||
CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
|
||||
CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
|
||||
A_TARGET_ACCESS_END_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0)
|
||||
goto error_target_access;
|
||||
|
||||
/* epping */
|
||||
/* poll timer */
|
||||
@@ -438,12 +448,15 @@ struct CE_handle *ce_init(struct hif_softc *scn,
|
||||
}
|
||||
|
||||
/* Enable CE error interrupts */
|
||||
A_TARGET_ACCESS_BEGIN_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
|
||||
goto error_target_access;
|
||||
CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
|
||||
A_TARGET_ACCESS_END_RET_PTR(scn);
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0)
|
||||
goto error_target_access;
|
||||
|
||||
return (struct CE_handle *)CE_state;
|
||||
|
||||
error_target_access:
|
||||
error_no_dma_mem:
|
||||
ce_fini((struct CE_handle *)CE_state);
|
||||
return NULL;
|
||||
|
@@ -652,6 +652,14 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
|
||||
}
|
||||
#endif /* WLAN_FEATURE_FASTPATH */
|
||||
|
||||
/**
|
||||
* ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
|
||||
* @coyeng: copy engine handle
|
||||
* @per_recv_context: virtual address of the nbuf
|
||||
* @buffer: physical address of the nbuf
|
||||
*
|
||||
* Return: 0 if the buffer is enqueued
|
||||
*/
|
||||
int
|
||||
ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
||||
void *per_recv_context, qdf_dma_addr_t buffer)
|
||||
@@ -663,7 +671,6 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
||||
unsigned int nentries_mask = dest_ring->nentries_mask;
|
||||
unsigned int write_index;
|
||||
unsigned int sw_index;
|
||||
int val = 0;
|
||||
uint64_t dma_addr = buffer;
|
||||
struct hif_softc *scn = CE_state->scn;
|
||||
|
||||
@@ -671,10 +678,9 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
||||
write_index = dest_ring->write_index;
|
||||
sw_index = dest_ring->sw_index;
|
||||
|
||||
A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
|
||||
if (val == -1) {
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
|
||||
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
||||
return val;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
|
||||
@@ -707,14 +713,8 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
||||
} else {
|
||||
status = QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
A_TARGET_ACCESS_END_RET_EXT(scn, val);
|
||||
if (val == -1) {
|
||||
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
||||
return val;
|
||||
}
|
||||
|
||||
Q_TARGET_ACCESS_END(scn);
|
||||
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@@ -72,53 +72,6 @@
|
||||
/* AXI gating when L1, L2 to reduce power consumption */
|
||||
#define CONFIG_PCIE_ENABLE_AXI_CLK_GATE 0
|
||||
|
||||
#if CONFIG_ATH_PCIE_MAX_PERF
|
||||
|
||||
#define A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val) \
|
||||
do {struct hif_softc *unused = scn; \
|
||||
unused = unused; } while (0)
|
||||
|
||||
#define A_TARGET_ACCESS_BEGIN_RET_PTR(scn) \
|
||||
do {struct hif_softc *unused = scn; \
|
||||
unused = unused; } while (0)
|
||||
|
||||
#define A_TARGET_ACCESS_END_RET_EXT(scn, val) \
|
||||
do {struct hif_softc *unused = scn; \
|
||||
unused = unused; } while (0)
|
||||
|
||||
#define A_TARGET_ACCESS_END_RET_PTR(scn) \
|
||||
do {struct hif_softc *unused = scn; \
|
||||
unused = unused; } while (0)
|
||||
|
||||
#else /* CONFIG_ATH_PCIE_MAX_PERF */
|
||||
|
||||
|
||||
#define A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val) \
|
||||
do { \
|
||||
if (!WLAN_IS_EPPING_ENABLED(hif_get_conparam(scn)) && \
|
||||
Q_TARGET_ACCESS_BEGIN(scn) < 0) \
|
||||
val = -1; \
|
||||
} while (0)
|
||||
|
||||
#define A_TARGET_ACCESS_BEGIN_RET_PTR(scn) \
|
||||
do { \
|
||||
if (Q_TARGET_ACCESS_BEGIN(scn) < 0) \
|
||||
return NULL; \
|
||||
} while (0)
|
||||
|
||||
#define A_TARGET_ACCESS_END_RET_EXT(scn, val) \
|
||||
do { \
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0) \
|
||||
val = -1; \
|
||||
} while (0)
|
||||
|
||||
#define A_TARGET_ACCESS_END_RET_PTR(scn) \
|
||||
do { \
|
||||
if (Q_TARGET_ACCESS_END(scn) < 0) \
|
||||
return NULL; \
|
||||
} while (0)
|
||||
#endif /* CONFIG_ATH_PCIE_MAX_PERF */
|
||||
|
||||
irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
|
||||
|
||||
/**
|
||||
|
@@ -47,11 +47,6 @@
|
||||
* Force 0 and consider moving corresponding code into
|
||||
* pci specific files
|
||||
*/
|
||||
#define A_TARGET_ACCESS_BEGIN_RET_PTR(scn)
|
||||
#define A_TARGET_ACCESS_END_RET_PTR(scn)
|
||||
#define A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val)
|
||||
#define A_TARGET_ACCESS_END_RET_EXT(scn, val)
|
||||
|
||||
#define ADRASTEA_CE_INTR_ENABLES 0x002F00A8
|
||||
#define ADRASTEA_CE_INTR_ENABLES_SET "COMING IN REGISTER SET36"
|
||||
#define ADRASTEA_CE_INTR_ENABLES_CLEAR "COMING IN REGISTER SET36"
|
||||
|
مرجع در شماره جدید
Block a user