|
@@ -1435,31 +1435,57 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
|
|
|
qdf_export_symbol(qdf_mem_multi_pages_free);
|
|
|
#endif
|
|
|
|
|
|
-void *qdf_aligned_malloc_fl(qdf_size_t size, uint32_t ring_base_align,
|
|
|
+void *qdf_aligned_malloc_fl(uint32_t *size,
|
|
|
void **vaddr_unaligned,
|
|
|
+ qdf_dma_addr_t *paddr_unaligned,
|
|
|
+ qdf_dma_addr_t *paddr_aligned,
|
|
|
+ uint32_t align,
|
|
|
const char *func, uint32_t line)
|
|
|
{
|
|
|
void *vaddr_aligned;
|
|
|
+ uint32_t align_alloc_size;
|
|
|
|
|
|
- *vaddr_unaligned = qdf_mem_malloc_fl(size, func, line);
|
|
|
+ *vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
|
|
|
+ line);
|
|
|
if (!*vaddr_unaligned) {
|
|
|
- qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line);
|
|
|
+ qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- if ((unsigned long)(*vaddr_unaligned) % ring_base_align) {
|
|
|
+ *paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
|
|
|
+
|
|
|
+ /* Re-allocate additional bytes to align base address only if
|
|
|
+ * above allocation returns unaligned address. Reason for
|
|
|
+ * trying exact size allocation above is, OS tries to allocate
|
|
|
+ * blocks of size power-of-2 pages and then free extra pages.
|
|
|
+ * e.g., of a ring size of 1MB, the allocation below will
|
|
|
+ * request 1MB plus 7 bytes for alignment, which will cause a
|
|
|
+ * 2MB block allocation,and that is failing sometimes due to
|
|
|
+ * memory fragmentation.
|
|
|
+ */
|
|
|
+ if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
|
|
|
+ align_alloc_size = *size + align - 1;
|
|
|
+
|
|
|
qdf_mem_free(*vaddr_unaligned);
|
|
|
- *vaddr_unaligned = qdf_mem_malloc_fl(size + ring_base_align - 1,
|
|
|
- func, line);
|
|
|
+ *vaddr_unaligned = qdf_mem_malloc_fl(
|
|
|
+ (qdf_size_t)align_alloc_size, func, line);
|
|
|
if (!*vaddr_unaligned) {
|
|
|
- qdf_warn("Failed to alloc %zuB @ %s:%d",
|
|
|
- size, func, line);
|
|
|
+ qdf_warn("Failed to alloc %uB @ %s:%d",
|
|
|
+ align_alloc_size, func, line);
|
|
|
return NULL;
|
|
|
}
|
|
|
+
|
|
|
+ *paddr_unaligned = qdf_mem_virt_to_phys(
|
|
|
+ *vaddr_unaligned);
|
|
|
+ *size = align_alloc_size;
|
|
|
}
|
|
|
|
|
|
- vaddr_aligned = (*vaddr_unaligned) +
|
|
|
- ((unsigned long)(*vaddr_unaligned) % ring_base_align);
|
|
|
+ *paddr_aligned = (qdf_dma_addr_t)qdf_align
|
|
|
+ ((unsigned long)(*paddr_unaligned), align);
|
|
|
+
|
|
|
+ vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
|
|
|
+ ((unsigned long)(*paddr_aligned) -
|
|
|
+ (unsigned long)(*paddr_unaligned)));
|
|
|
|
|
|
return vaddr_aligned;
|
|
|
}
|
|
@@ -1859,36 +1885,56 @@ qdf_export_symbol(qdf_mem_free_consistent);
|
|
|
#endif /* MEMORY_DEBUG */
|
|
|
|
|
|
void *qdf_aligned_mem_alloc_consistent_fl(
|
|
|
- qdf_device_t osdev, void *dev, qdf_size_t size,
|
|
|
+ qdf_device_t osdev, uint32_t *size,
|
|
|
void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
|
|
|
- qdf_dma_addr_t *paddr_aligned, uint32_t ring_base_align,
|
|
|
+ qdf_dma_addr_t *paddr_aligned, uint32_t align,
|
|
|
const char *func, uint32_t line)
|
|
|
{
|
|
|
void *vaddr_aligned;
|
|
|
+ uint32_t align_alloc_size;
|
|
|
|
|
|
- *vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev, size,
|
|
|
- paddr_unaligned);
|
|
|
+ *vaddr_unaligned = qdf_mem_alloc_consistent(
|
|
|
+ osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
|
|
|
if (!*vaddr_unaligned) {
|
|
|
- qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line);
|
|
|
+ qdf_warn("Failed to alloc %uB @ %s:%d",
|
|
|
+ *size, func, line);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- if ((unsigned long)(*vaddr_unaligned) % ring_base_align) {
|
|
|
- qdf_mem_free_consistent(osdev, dev, size, *vaddr_unaligned,
|
|
|
+ /* Re-allocate additional bytes to align base address only if
|
|
|
+ * above allocation returns unaligned address. Reason for
|
|
|
+ * trying exact size allocation above is, OS tries to allocate
|
|
|
+ * blocks of size power-of-2 pages and then free extra pages.
|
|
|
+ * e.g., of a ring size of 1MB, the allocation below will
|
|
|
+ * request 1MB plus 7 bytes for alignment, which will cause a
|
|
|
+ * 2MB block allocation,and that is failing sometimes due to
|
|
|
+ * memory fragmentation.
|
|
|
+ */
|
|
|
+ if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
|
|
|
+ align_alloc_size = *size + align - 1;
|
|
|
+
|
|
|
+ qdf_mem_free_consistent(osdev, osdev->dev, *size,
|
|
|
+ *vaddr_unaligned,
|
|
|
*paddr_unaligned, 0);
|
|
|
- *vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev,
|
|
|
- size + ring_base_align - 1, paddr_unaligned);
|
|
|
+
|
|
|
+ *vaddr_unaligned = qdf_mem_alloc_consistent(
|
|
|
+ osdev, osdev->dev, align_alloc_size,
|
|
|
+ paddr_unaligned);
|
|
|
if (!*vaddr_unaligned) {
|
|
|
- qdf_warn("Failed to alloc %zuB @ %s:%d",
|
|
|
- size, func, line);
|
|
|
+ qdf_warn("Failed to alloc %uB @ %s:%d",
|
|
|
+ align_alloc_size, func, line);
|
|
|
return NULL;
|
|
|
}
|
|
|
+
|
|
|
+ *size = align_alloc_size;
|
|
|
}
|
|
|
|
|
|
- vaddr_aligned = *vaddr_unaligned +
|
|
|
- ((unsigned long)(*vaddr_unaligned) % ring_base_align);
|
|
|
- *paddr_aligned = *paddr_unaligned + ((unsigned long)(vaddr_aligned) -
|
|
|
- (unsigned long)(*vaddr_unaligned));
|
|
|
+ *paddr_aligned = (qdf_dma_addr_t)qdf_align(
|
|
|
+ (unsigned long)(*paddr_unaligned), align);
|
|
|
+
|
|
|
+ vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
|
|
|
+ ((unsigned long)(*paddr_aligned) -
|
|
|
+ (unsigned long)(*paddr_unaligned)));
|
|
|
|
|
|
return vaddr_aligned;
|
|
|
}
|