qcacmn: Add Cached Descriptor support for DP SRNGs
Add support to configure any HAL SRNG descriptor to be allocated from cached memory area. This is to optimize of CPU cycles spent on uncached memory accesses. Also added prefetch of cached descriptors Change-Id: I2544e8596d48e2f5549bf687a764c16d73397545 CRs-fixed: 2267945
This commit is contained in:

committed by
Nitesh Shrivastav

parent
e8aab3b8ca
commit
ab234e56f6
@@ -312,6 +312,7 @@ enum hal_ring_type {
|
||||
#define HAL_SRNG_DATA_TLV_SWAP 0x00000020
|
||||
#define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
|
||||
#define HAL_SRNG_MSI_INTR 0x00020000
|
||||
#define HAL_SRNG_CACHED_DESC 0x00040000
|
||||
|
||||
#define PN_SIZE_24 0
|
||||
#define PN_SIZE_48 1
|
||||
@@ -520,6 +521,26 @@ static inline bool hal_srng_initialized(void *hal_ring)
|
||||
return !!srng->initialized;
|
||||
}
|
||||
|
||||
/**
|
||||
* hal_srng_dst_peek - Check if there are any entries in the ring (peek)
|
||||
* @hal_soc: Opaque HAL SOC handle
|
||||
* @hal_ring: Destination ring pointer
|
||||
*
|
||||
* Caller takes responsibility for any locking needs.
|
||||
*
|
||||
* Return: Opaque pointer for next ring entry; NULL on failire
|
||||
*/
|
||||
static inline
|
||||
void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
|
||||
{
|
||||
struct hal_srng *srng = (struct hal_srng *)hal_ring;
|
||||
|
||||
if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
|
||||
return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
|
||||
* hal_srng_access_start if locked access is required
|
||||
@@ -532,14 +553,30 @@ static inline bool hal_srng_initialized(void *hal_ring)
|
||||
static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
|
||||
{
|
||||
struct hal_srng *srng = (struct hal_srng *)hal_ring;
|
||||
struct hal_soc *soc = (struct hal_soc *)hal_soc;
|
||||
uint32_t *desc;
|
||||
|
||||
if (srng->ring_dir == HAL_SRNG_SRC_RING)
|
||||
srng->u.src_ring.cached_tp =
|
||||
*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
|
||||
else
|
||||
else {
|
||||
srng->u.dst_ring.cached_hp =
|
||||
*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
|
||||
|
||||
if (srng->flags & HAL_SRNG_CACHED_DESC) {
|
||||
desc = hal_srng_dst_peek(hal_soc, hal_ring);
|
||||
if (qdf_likely(desc)) {
|
||||
qdf_mem_dma_cache_sync(soc->qdf_dev,
|
||||
qdf_mem_virt_to_phys
|
||||
(desc),
|
||||
QDF_DMA_FROM_DEVICE,
|
||||
(srng->entry_size *
|
||||
sizeof(uint32_t)));
|
||||
qdf_prefetch(desc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -577,7 +614,10 @@ static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
|
||||
static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
|
||||
{
|
||||
struct hal_srng *srng = (struct hal_srng *)hal_ring;
|
||||
struct hal_soc *soc = (struct hal_soc *)hal_soc;
|
||||
uint32_t *desc;
|
||||
uint32_t *desc_next;
|
||||
uint32_t tp;
|
||||
|
||||
if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
|
||||
desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
|
||||
@@ -590,6 +630,17 @@ static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
|
||||
srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
|
||||
srng->ring_size;
|
||||
|
||||
if (srng->flags & HAL_SRNG_CACHED_DESC) {
|
||||
tp = srng->u.dst_ring.tp;
|
||||
desc_next = &srng->ring_base_vaddr[tp];
|
||||
qdf_mem_dma_cache_sync(soc->qdf_dev,
|
||||
qdf_mem_virt_to_phys(desc_next),
|
||||
QDF_DMA_FROM_DEVICE,
|
||||
(srng->entry_size *
|
||||
sizeof(uint32_t)));
|
||||
qdf_prefetch(desc_next);
|
||||
}
|
||||
|
||||
return (void *)desc;
|
||||
}
|
||||
|
||||
@@ -627,26 +678,6 @@ static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hal_srng_dst_peek - Check if there are any entries in the ring (peek)
|
||||
* @hal_soc: Opaque HAL SOC handle
|
||||
* @hal_ring: Destination ring pointer
|
||||
*
|
||||
* Caller takes responsibility for any locking needs.
|
||||
*
|
||||
* Return: Opaque pointer for next ring entry; NULL on failire
|
||||
*/
|
||||
static inline
|
||||
void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
|
||||
{
|
||||
struct hal_srng *srng = (struct hal_srng *)hal_ring;
|
||||
|
||||
if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
|
||||
return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
|
||||
* @hal_soc: Opaque HAL SOC handle
|
||||
|
Reference in New Issue
Block a user