qcacmn: Fix issues in qdf aligned memory alloc function
1. Alloc size should be passed as input and output parameter since we need to pass the exact alloc_size while freeing the memory. 2. use qdf_align() to calculate the aligned physical address. 3. In DBR component, pass correct arguments to qdf_aligned_malloc() to fix the compilation errors. Alloc size should be passed as input and output parameter since we need to pass the exact alloc_size while freeing the DMA memory. Change-Id: I83051b8aa54bbf3bb663902d8f17f2c3c55e57bf CRs-Fixed: 2462441
This commit is contained in:

committed by
nshrivas

parent
0ebbf5a1de
commit
ec527358c2
@@ -292,9 +292,11 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* qdf_aligned_malloc() - allocates aligned QDF memory.
|
* qdf_aligned_malloc() - allocates aligned QDF memory.
|
||||||
* @size: Number of bytes of memory to allocate.
|
* @size: Size to be allocated
|
||||||
* @ring_base_align: Base address alignment.
|
|
||||||
* @vaddr_unaligned: Unaligned virtual address.
|
* @vaddr_unaligned: Unaligned virtual address.
|
||||||
|
* @paddr_unaligned: Unaligned physical address.
|
||||||
|
* @paddr_aligned: Aligned physical address.
|
||||||
|
* @align: Base address alignment.
|
||||||
* @func: Function name of the call site.
|
* @func: Function name of the call site.
|
||||||
* @line: Line number of the call site.
|
* @line: Line number of the call site.
|
||||||
*
|
*
|
||||||
@@ -308,39 +310,44 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
|
|||||||
* memory. If this function is unable to allocate the amount of memory
|
* memory. If this function is unable to allocate the amount of memory
|
||||||
* specified (for any reason) it returns NULL.
|
* specified (for any reason) it returns NULL.
|
||||||
*/
|
*/
|
||||||
#define qdf_aligned_malloc(size, ring_base_align, vaddr_unaligned) \
|
#define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
|
||||||
qdf_aligned_malloc_fl(size, ring_base_align, vaddr_unaligned, \
|
paddr_aligned, align) \
|
||||||
__func__, __LINE__)
|
qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
|
||||||
|
paddr_aligned, align, __func__, __LINE__)
|
||||||
|
|
||||||
void *qdf_aligned_malloc_fl(qdf_size_t size, uint32_t ring_base_align,
|
void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
|
||||||
void **vaddr_unaligned,
|
qdf_dma_addr_t *paddr_unaligned,
|
||||||
|
qdf_dma_addr_t *paddr_aligned,
|
||||||
|
uint32_t align,
|
||||||
const char *func, uint32_t line);
|
const char *func, uint32_t line);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
|
* qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
|
||||||
* @osdev: OS device handle
|
* @osdev: OS device handle
|
||||||
* @dev: Pointer to device handle
|
|
||||||
* @size: Size to be allocated
|
* @size: Size to be allocated
|
||||||
* @vaddr_unaligned: Unaligned virtual address.
|
* @vaddr_unaligned: Unaligned virtual address.
|
||||||
* @paddr_unaligned: Unaligned physical address.
|
* @paddr_unaligned: Unaligned physical address.
|
||||||
* @paddr_aligned: Aligned physical address.
|
* @paddr_aligned: Aligned physical address.
|
||||||
* @ring_base_align: Base address alignment.
|
* @align: Base address alignment.
|
||||||
* @func: Function name of the call site.
|
* @func: Function name of the call site.
|
||||||
* @line: Line number of the call site.
|
* @line: Line number of the call site.
|
||||||
*
|
*
|
||||||
* Return: pointer of allocated memory or null if memory alloc fails.
|
* Return: pointer of allocated memory or null if memory alloc fails.
|
||||||
*/
|
*/
|
||||||
#define qdf_aligned_mem_alloc_consistent(osdev, dev, size, vaddr_unaligned, \
|
#define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
|
||||||
paddr_unaligned, paddr_aligned, ring_base_align) \
|
paddr_unaligned, paddr_aligned, \
|
||||||
qdf_aligned_mem_alloc_consistent_fl(osdev, dev, size, vaddr_unaligned, \
|
align) \
|
||||||
paddr_unaligned, paddr_aligned, \
|
qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
|
||||||
ring_base_align, __func__, __LINE__)
|
paddr_unaligned, paddr_aligned, \
|
||||||
|
align, __func__, __LINE__)
|
||||||
|
|
||||||
|
void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
|
||||||
|
void **vaddr_unaligned,
|
||||||
|
qdf_dma_addr_t *paddr_unaligned,
|
||||||
|
qdf_dma_addr_t *paddr_aligned,
|
||||||
|
uint32_t align, const char *func,
|
||||||
|
uint32_t line);
|
||||||
|
|
||||||
void *qdf_aligned_mem_alloc_consistent_fl(
|
|
||||||
qdf_device_t osdev, void *dev, qdf_size_t size,
|
|
||||||
void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
|
|
||||||
qdf_dma_addr_t *paddr_aligned, uint32_t ring_base_align,
|
|
||||||
const char *func, uint32_t line);
|
|
||||||
#define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
|
#define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
|
||||||
|
|
||||||
void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
|
void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
|
||||||
|
@@ -1435,31 +1435,57 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
|
|||||||
qdf_export_symbol(qdf_mem_multi_pages_free);
|
qdf_export_symbol(qdf_mem_multi_pages_free);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void *qdf_aligned_malloc_fl(qdf_size_t size, uint32_t ring_base_align,
|
void *qdf_aligned_malloc_fl(uint32_t *size,
|
||||||
void **vaddr_unaligned,
|
void **vaddr_unaligned,
|
||||||
|
qdf_dma_addr_t *paddr_unaligned,
|
||||||
|
qdf_dma_addr_t *paddr_aligned,
|
||||||
|
uint32_t align,
|
||||||
const char *func, uint32_t line)
|
const char *func, uint32_t line)
|
||||||
{
|
{
|
||||||
void *vaddr_aligned;
|
void *vaddr_aligned;
|
||||||
|
uint32_t align_alloc_size;
|
||||||
|
|
||||||
*vaddr_unaligned = qdf_mem_malloc_fl(size, func, line);
|
*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
|
||||||
|
line);
|
||||||
if (!*vaddr_unaligned) {
|
if (!*vaddr_unaligned) {
|
||||||
qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line);
|
qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((unsigned long)(*vaddr_unaligned) % ring_base_align) {
|
*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
|
||||||
|
|
||||||
|
/* Re-allocate additional bytes to align base address only if
|
||||||
|
* above allocation returns unaligned address. Reason for
|
||||||
|
* trying exact size allocation above is, OS tries to allocate
|
||||||
|
* blocks of size power-of-2 pages and then free extra pages.
|
||||||
|
* e.g., of a ring size of 1MB, the allocation below will
|
||||||
|
* request 1MB plus 7 bytes for alignment, which will cause a
|
||||||
|
* 2MB block allocation,and that is failing sometimes due to
|
||||||
|
* memory fragmentation.
|
||||||
|
*/
|
||||||
|
if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
|
||||||
|
align_alloc_size = *size + align - 1;
|
||||||
|
|
||||||
qdf_mem_free(*vaddr_unaligned);
|
qdf_mem_free(*vaddr_unaligned);
|
||||||
*vaddr_unaligned = qdf_mem_malloc_fl(size + ring_base_align - 1,
|
*vaddr_unaligned = qdf_mem_malloc_fl(
|
||||||
func, line);
|
(qdf_size_t)align_alloc_size, func, line);
|
||||||
if (!*vaddr_unaligned) {
|
if (!*vaddr_unaligned) {
|
||||||
qdf_warn("Failed to alloc %zuB @ %s:%d",
|
qdf_warn("Failed to alloc %uB @ %s:%d",
|
||||||
size, func, line);
|
align_alloc_size, func, line);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*paddr_unaligned = qdf_mem_virt_to_phys(
|
||||||
|
*vaddr_unaligned);
|
||||||
|
*size = align_alloc_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
vaddr_aligned = (*vaddr_unaligned) +
|
*paddr_aligned = (qdf_dma_addr_t)qdf_align
|
||||||
((unsigned long)(*vaddr_unaligned) % ring_base_align);
|
((unsigned long)(*paddr_unaligned), align);
|
||||||
|
|
||||||
|
vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
|
||||||
|
((unsigned long)(*paddr_aligned) -
|
||||||
|
(unsigned long)(*paddr_unaligned)));
|
||||||
|
|
||||||
return vaddr_aligned;
|
return vaddr_aligned;
|
||||||
}
|
}
|
||||||
@@ -1859,36 +1885,56 @@ qdf_export_symbol(qdf_mem_free_consistent);
|
|||||||
#endif /* MEMORY_DEBUG */
|
#endif /* MEMORY_DEBUG */
|
||||||
|
|
||||||
void *qdf_aligned_mem_alloc_consistent_fl(
|
void *qdf_aligned_mem_alloc_consistent_fl(
|
||||||
qdf_device_t osdev, void *dev, qdf_size_t size,
|
qdf_device_t osdev, uint32_t *size,
|
||||||
void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
|
void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
|
||||||
qdf_dma_addr_t *paddr_aligned, uint32_t ring_base_align,
|
qdf_dma_addr_t *paddr_aligned, uint32_t align,
|
||||||
const char *func, uint32_t line)
|
const char *func, uint32_t line)
|
||||||
{
|
{
|
||||||
void *vaddr_aligned;
|
void *vaddr_aligned;
|
||||||
|
uint32_t align_alloc_size;
|
||||||
|
|
||||||
*vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev, size,
|
*vaddr_unaligned = qdf_mem_alloc_consistent(
|
||||||
paddr_unaligned);
|
osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
|
||||||
if (!*vaddr_unaligned) {
|
if (!*vaddr_unaligned) {
|
||||||
qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line);
|
qdf_warn("Failed to alloc %uB @ %s:%d",
|
||||||
|
*size, func, line);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((unsigned long)(*vaddr_unaligned) % ring_base_align) {
|
/* Re-allocate additional bytes to align base address only if
|
||||||
qdf_mem_free_consistent(osdev, dev, size, *vaddr_unaligned,
|
* above allocation returns unaligned address. Reason for
|
||||||
|
* trying exact size allocation above is, OS tries to allocate
|
||||||
|
* blocks of size power-of-2 pages and then free extra pages.
|
||||||
|
* e.g., of a ring size of 1MB, the allocation below will
|
||||||
|
* request 1MB plus 7 bytes for alignment, which will cause a
|
||||||
|
* 2MB block allocation,and that is failing sometimes due to
|
||||||
|
* memory fragmentation.
|
||||||
|
*/
|
||||||
|
if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
|
||||||
|
align_alloc_size = *size + align - 1;
|
||||||
|
|
||||||
|
qdf_mem_free_consistent(osdev, osdev->dev, *size,
|
||||||
|
*vaddr_unaligned,
|
||||||
*paddr_unaligned, 0);
|
*paddr_unaligned, 0);
|
||||||
*vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev,
|
|
||||||
size + ring_base_align - 1, paddr_unaligned);
|
*vaddr_unaligned = qdf_mem_alloc_consistent(
|
||||||
|
osdev, osdev->dev, align_alloc_size,
|
||||||
|
paddr_unaligned);
|
||||||
if (!*vaddr_unaligned) {
|
if (!*vaddr_unaligned) {
|
||||||
qdf_warn("Failed to alloc %zuB @ %s:%d",
|
qdf_warn("Failed to alloc %uB @ %s:%d",
|
||||||
size, func, line);
|
align_alloc_size, func, line);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*size = align_alloc_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
vaddr_aligned = *vaddr_unaligned +
|
*paddr_aligned = (qdf_dma_addr_t)qdf_align(
|
||||||
((unsigned long)(*vaddr_unaligned) % ring_base_align);
|
(unsigned long)(*paddr_unaligned), align);
|
||||||
*paddr_aligned = *paddr_unaligned + ((unsigned long)(vaddr_aligned) -
|
|
||||||
(unsigned long)(*vaddr_unaligned));
|
vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
|
||||||
|
((unsigned long)(*paddr_aligned) -
|
||||||
|
(unsigned long)(*paddr_unaligned)));
|
||||||
|
|
||||||
return vaddr_aligned;
|
return vaddr_aligned;
|
||||||
}
|
}
|
||||||
|
@@ -371,7 +371,6 @@ static QDF_STATUS target_if_dbr_fill_ring(struct wlan_objmgr_pdev *pdev,
|
|||||||
struct direct_buf_rx_module_param *mod_param)
|
struct direct_buf_rx_module_param *mod_param)
|
||||||
{
|
{
|
||||||
uint32_t idx;
|
uint32_t idx;
|
||||||
void *buf, *buf_aligned;
|
|
||||||
struct direct_buf_rx_ring_cfg *dbr_ring_cfg;
|
struct direct_buf_rx_ring_cfg *dbr_ring_cfg;
|
||||||
struct direct_buf_rx_ring_cap *dbr_ring_cap;
|
struct direct_buf_rx_ring_cap *dbr_ring_cap;
|
||||||
struct direct_buf_rx_buf_info *dbr_buf_pool;
|
struct direct_buf_rx_buf_info *dbr_buf_pool;
|
||||||
@@ -384,23 +383,28 @@ static QDF_STATUS target_if_dbr_fill_ring(struct wlan_objmgr_pdev *pdev,
|
|||||||
dbr_buf_pool = mod_param->dbr_buf_pool;
|
dbr_buf_pool = mod_param->dbr_buf_pool;
|
||||||
|
|
||||||
for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) {
|
for (idx = 0; idx < dbr_ring_cfg->num_ptr - 1; idx++) {
|
||||||
buf_aligned = qdf_aligned_malloc(dbr_ring_cap->min_buf_size,
|
void *buf_vaddr_unaligned, *buf_vaddr_aligned;
|
||||||
dbr_ring_cap->min_buf_align,
|
dma_addr_t buf_paddr_aligned, buf_paddr_unaligned;
|
||||||
&buf);
|
|
||||||
if (!buf_aligned) {
|
buf_vaddr_aligned = qdf_aligned_malloc(
|
||||||
direct_buf_rx_err(
|
&dbr_ring_cap->min_buf_size, &buf_vaddr_unaligned,
|
||||||
"dir buf rx ring buf_aligned alloc failed");
|
&buf_paddr_unaligned, &buf_paddr_aligned,
|
||||||
|
dbr_ring_cap->min_buf_align);
|
||||||
|
|
||||||
|
if (!buf_vaddr_aligned) {
|
||||||
|
direct_buf_rx_err("dir buf rx ring alloc failed");
|
||||||
return QDF_STATUS_E_NOMEM;
|
return QDF_STATUS_E_NOMEM;
|
||||||
}
|
}
|
||||||
dbr_buf_pool[idx].vaddr = buf;
|
dbr_buf_pool[idx].vaddr = buf_vaddr_unaligned;
|
||||||
dbr_buf_pool[idx].offset = buf_aligned - buf;
|
dbr_buf_pool[idx].offset = buf_vaddr_aligned -
|
||||||
|
buf_vaddr_unaligned;
|
||||||
dbr_buf_pool[idx].cookie = idx;
|
dbr_buf_pool[idx].cookie = idx;
|
||||||
status = target_if_dbr_replenish_ring(pdev, mod_param,
|
status = target_if_dbr_replenish_ring(pdev, mod_param,
|
||||||
buf_aligned, idx);
|
buf_vaddr_aligned, idx);
|
||||||
if (QDF_IS_STATUS_ERROR(status)) {
|
if (QDF_IS_STATUS_ERROR(status)) {
|
||||||
direct_buf_rx_err("replenish failed with status : %d",
|
direct_buf_rx_err("replenish failed with status : %d",
|
||||||
status);
|
status);
|
||||||
qdf_mem_free(buf);
|
qdf_mem_free(buf_vaddr_unaligned);
|
||||||
return QDF_STATUS_E_FAILURE;
|
return QDF_STATUS_E_FAILURE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user