Merge "qcacmn: Break up DMA mem alloc for HW desc banks in multi pages"

This commit is contained in:
Linux Build Service Account
2020-02-18 18:08:17 -08:00
committed by Gerrit - the friendly Code Review server
8 changed files with 193 additions and 212 deletions

View File

@@ -2054,4 +2054,34 @@ void dp_is_hw_dbs_enable(struct dp_soc *soc,
#if defined(WLAN_SUPPORT_RX_FISA) #if defined(WLAN_SUPPORT_RX_FISA)
void dp_rx_dump_fisa_table(struct dp_soc *soc); void dp_rx_dump_fisa_table(struct dp_soc *soc);
#endif /* WLAN_SUPPORT_RX_FISA */ #endif /* WLAN_SUPPORT_RX_FISA */
#ifdef MAX_ALLOC_PAGE_SIZE
/**
* dp_set_page_size() - Set the max page size for hw link desc.
* For MCL the page size is set to OS defined value and for WIN
* the page size is set to the max_alloc_size cfg ini
* param.
* This is to ensure that WIN gets contiguous memory allocations
* as per requirement.
* @pages: link desc page handle
* @max_alloc_size: max_alloc_size
*
* Return: None
*/
static inline
void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
uint32_t max_alloc_size)
{
pages->page_size = qdf_page_size;
}
#else
static inline
void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
uint32_t max_alloc_size)
{
pages->page_size = max_alloc_size;
}
#endif /* MAX_ALLOC_PAGE_SIZE */
#endif /* #ifndef _DP_INTERNAL_H_ */ #endif /* #ifndef _DP_INTERNAL_H_ */

View File

@@ -2098,12 +2098,16 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
uint32_t total_link_descs, total_mem_size; uint32_t total_link_descs, total_mem_size;
uint32_t num_mpdu_link_descs, num_mpdu_queue_descs; uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs; uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
uint32_t num_link_desc_banks;
uint32_t last_bank_size = 0;
uint32_t entry_size, num_entries; uint32_t entry_size, num_entries;
int i; int i;
uint32_t desc_id = 0; uint32_t cookie = 0;
qdf_dma_addr_t *baseaddr = NULL; qdf_dma_addr_t *baseaddr = NULL;
uint32_t page_idx = 0;
struct qdf_mem_multi_page_t *pages;
struct qdf_mem_dma_page_t *dma_pages;
uint32_t offset = 0;
uint32_t count = 0;
uint32_t num_descs_per_page;
/* Only Tx queue descriptors are allocated from common link descriptor /* Only Tx queue descriptors are allocated from common link descriptor
* pool Rx queue descriptors are not included in this because (REO queue * pool Rx queue descriptors are not included in this because (REO queue
@@ -2138,98 +2142,28 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
total_mem_size += link_desc_align; total_mem_size += link_desc_align;
if (total_mem_size <= max_alloc_size) {
num_link_desc_banks = 0;
last_bank_size = total_mem_size;
} else {
num_link_desc_banks = (total_mem_size) /
(max_alloc_size - link_desc_align);
last_bank_size = total_mem_size %
(max_alloc_size - link_desc_align);
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
FL("total_mem_size: %d, num_link_desc_banks: %u"), FL("total_mem_size: %d"), total_mem_size);
total_mem_size, num_link_desc_banks);
for (i = 0; i < num_link_desc_banks; i++) { pages = &soc->link_desc_pages;
if (!dp_is_soc_reinit(soc)) { dp_set_max_page_size(pages, max_alloc_size);
baseaddr = &soc->link_desc_banks[i]. if (!dp_is_soc_reinit(soc)) {
base_paddr_unaligned; qdf_mem_multi_pages_alloc(soc->osdev,
soc->link_desc_banks[i].base_vaddr_unaligned = pages,
qdf_mem_alloc_consistent(soc->osdev, link_desc_size,
soc->osdev->dev, total_link_descs,
max_alloc_size, 0, false);
baseaddr); if (!pages->num_pages) {
dp_err("Multi page alloc fail for hw link desc pool");
goto fail_page_alloc;
} }
soc->link_desc_banks[i].size = max_alloc_size; qdf_minidump_log(
(void *)(pages->dma_pages->page_v_addr_start),
soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)( pages->num_pages *
soc->link_desc_banks[i].base_vaddr_unaligned) + sizeof(struct qdf_mem_dma_page_t),
((unsigned long)( "hw_link_desc_bank");
soc->link_desc_banks[i].base_vaddr_unaligned) %
link_desc_align));
soc->link_desc_banks[i].base_paddr = (unsigned long)(
soc->link_desc_banks[i].base_paddr_unaligned) +
((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
(unsigned long)(
soc->link_desc_banks[i].base_vaddr_unaligned));
if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Link descriptor memory alloc failed"));
goto fail;
}
if (!dp_is_soc_reinit(soc)) {
qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
soc->link_desc_banks[i].size,
"link_desc_bank");
}
qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
soc->link_desc_banks[i].size,
"link_desc_bank");
} }
if (last_bank_size) {
/* Allocate last bank in case total memory required is not exact
* multiple of max_alloc_size
*/
if (!dp_is_soc_reinit(soc)) {
baseaddr = &soc->link_desc_banks[i].
base_paddr_unaligned;
soc->link_desc_banks[i].base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev,
last_bank_size,
baseaddr);
}
soc->link_desc_banks[i].size = last_bank_size;
soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
(soc->link_desc_banks[i].base_vaddr_unaligned) +
((unsigned long)(
soc->link_desc_banks[i].base_vaddr_unaligned) %
link_desc_align));
soc->link_desc_banks[i].base_paddr =
(unsigned long)(
soc->link_desc_banks[i].base_paddr_unaligned) +
((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
(unsigned long)(
soc->link_desc_banks[i].base_vaddr_unaligned));
if (!dp_is_soc_reinit(soc)) {
qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
soc->link_desc_banks[i].size,
"link_desc_bank");
}
qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
soc->link_desc_banks[i].size,
"link_desc_bank");
}
/* Allocate and setup link descriptor idle list for HW internal use */ /* Allocate and setup link descriptor idle list for HW internal use */
entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK); entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
total_mem_size = entry_size * total_link_descs; total_mem_size = entry_size * total_link_descs;
@@ -2250,27 +2184,27 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
hal_srng_access_start_unlocked(soc->hal_soc, hal_srng_access_start_unlocked(soc->hal_soc,
soc->wbm_idle_link_ring.hal_srng); soc->wbm_idle_link_ring.hal_srng);
page_idx = 0; count = 0;
for (i = 0; i < MAX_LINK_DESC_BANKS && offset = 0;
soc->link_desc_banks[i].base_paddr; i++) { pages = &soc->link_desc_pages;
uint32_t num_entries = (soc->link_desc_banks[i].size - if (pages->dma_pages)
((unsigned long)( dma_pages = pages->dma_pages;
soc->link_desc_banks[i].base_vaddr) - else
(unsigned long)( goto fail;
soc->link_desc_banks[i].base_vaddr_unaligned))) num_descs_per_page =
/ link_desc_size; pages->num_element_per_page;
unsigned long paddr = (unsigned long)( while ((desc = hal_srng_src_get_next(
soc->link_desc_banks[i].base_paddr);
while (num_entries && (desc = hal_srng_src_get_next(
soc->hal_soc, soc->hal_soc,
soc->wbm_idle_link_ring.hal_srng))) { soc->wbm_idle_link_ring.hal_srng)) &&
hal_set_link_desc_addr(desc, (count < total_link_descs)) {
LINK_DESC_COOKIE(desc_id, i), paddr); page_idx = count / num_descs_per_page;
num_entries--; offset = count % num_descs_per_page;
desc_id++; cookie = LINK_DESC_COOKIE(count, page_idx);
paddr += link_desc_size; hal_set_link_desc_addr(
} desc, cookie,
dma_pages[page_idx].page_p_addr
+ (offset * link_desc_size));
count++;
} }
hal_srng_access_end_unlocked(soc->hal_soc, hal_srng_access_end_unlocked(soc->hal_soc,
soc->wbm_idle_link_ring.hal_srng); soc->wbm_idle_link_ring.hal_srng);
@@ -2322,40 +2256,38 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
scatter_buf_ptr = (uint8_t *)( scatter_buf_ptr = (uint8_t *)(
soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]); soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
rem_entries = num_entries_per_buf; rem_entries = num_entries_per_buf;
pages = &soc->link_desc_pages;
for (i = 0; i < MAX_LINK_DESC_BANKS && page_idx = 0; count = 0;
soc->link_desc_banks[i].base_paddr; i++) { offset = 0;
uint32_t num_link_descs = num_descs_per_page =
(soc->link_desc_banks[i].size - pages->num_element_per_page;
((unsigned long)( if (pages->dma_pages)
soc->link_desc_banks[i].base_vaddr) - dma_pages = pages->dma_pages;
(unsigned long)( else
soc->link_desc_banks[i].base_vaddr_unaligned))) goto fail;
/ link_desc_size; while (count < total_link_descs) {
unsigned long paddr = (unsigned long)( page_idx = count / num_descs_per_page;
soc->link_desc_banks[i].base_paddr); offset = count % num_descs_per_page;
cookie = LINK_DESC_COOKIE(count, page_idx);
while (num_link_descs) { hal_set_link_desc_addr(
hal_set_link_desc_addr((void *)scatter_buf_ptr, (void *)scatter_buf_ptr,
LINK_DESC_COOKIE(desc_id, i), paddr); cookie,
num_link_descs--; dma_pages[page_idx].page_p_addr +
desc_id++; (offset * link_desc_size));
paddr += link_desc_size; rem_entries--;
rem_entries--; if (rem_entries) {
if (rem_entries) { scatter_buf_ptr += entry_size;
scatter_buf_ptr += entry_size; } else {
} else { rem_entries = num_entries_per_buf;
rem_entries = num_entries_per_buf; scatter_buf_num++;
scatter_buf_num++; if (scatter_buf_num >= num_scatter_bufs)
break;
if (scatter_buf_num >= num_scatter_bufs) scatter_buf_ptr =
break; (uint8_t *)
(soc->wbm_idle_scatter_buf_base_vaddr[
scatter_buf_ptr = (uint8_t *)( scatter_buf_num]);
soc->wbm_idle_scatter_buf_base_vaddr[
scatter_buf_num]);
}
} }
count++;
} }
/* Setup link descriptor idle list in HW */ /* Setup link descriptor idle list in HW */
hal_setup_link_idle_list(soc->hal_soc, hal_setup_link_idle_list(soc->hal_soc,
@@ -2384,17 +2316,15 @@ fail:
} }
} }
for (i = 0; i < MAX_LINK_DESC_BANKS; i++) { pages = &soc->link_desc_pages;
if (soc->link_desc_banks[i].base_vaddr_unaligned) { qdf_minidump_remove(
qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, (void *)pages->dma_pages->page_v_addr_start);
soc->link_desc_banks[i].size, qdf_mem_multi_pages_free(soc->osdev,
soc->link_desc_banks[i].base_vaddr_unaligned, pages, 0, false);
soc->link_desc_banks[i].base_paddr_unaligned,
0);
soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
}
}
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
fail_page_alloc:
return QDF_STATUS_E_FAULT;
} }
/* /*
@@ -2403,6 +2333,7 @@ fail:
static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc) static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
{ {
int i; int i;
struct qdf_mem_multi_page_t *pages;
if (soc->wbm_idle_link_ring.hal_srng) { if (soc->wbm_idle_link_ring.hal_srng) {
qdf_minidump_remove( qdf_minidump_remove(
@@ -2421,17 +2352,11 @@ static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
} }
} }
for (i = 0; i < MAX_LINK_DESC_BANKS; i++) { pages = &soc->link_desc_pages;
if (soc->link_desc_banks[i].base_vaddr_unaligned) { qdf_minidump_remove(
qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr); (void *)pages->dma_pages->page_v_addr_start);
qdf_mem_free_consistent(soc->osdev, soc->osdev->dev, qdf_mem_multi_pages_free(soc->osdev,
soc->link_desc_banks[i].size, pages, 0, false);
soc->link_desc_banks[i].base_vaddr_unaligned,
soc->link_desc_banks[i].base_paddr_unaligned,
0);
soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
}
}
} }
#ifdef IPA_OFFLOAD #ifdef IPA_OFFLOAD

View File

@@ -688,8 +688,9 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
* dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
* the MSDU Link Descriptor * the MSDU Link Descriptor
* @soc: core txrx main context * @soc: core txrx main context
* @buf_info: buf_info include cookie that used to lookup virtual address of * @buf_info: buf_info includes cookie that is used to lookup
* link descriptor Normally this is just an index into a per SOC array. * virtual address of link descriptor after deriving the page id
* and the offset or index of the desc on the associatde page.
* *
* This is the VA of the link descriptor, that HAL layer later uses to * This is the VA of the link descriptor, that HAL layer later uses to
* retrieve the list of MSDU's for a given MPDU. * retrieve the list of MSDU's for a given MPDU.
@@ -701,16 +702,16 @@ void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
struct hal_buf_info *buf_info) struct hal_buf_info *buf_info)
{ {
void *link_desc_va; void *link_desc_va;
uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie); struct qdf_mem_multi_page_t *pages;
uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
/* TODO */
/* Add sanity for cookie */
link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
(buf_info->paddr -
soc->link_desc_banks[bank_id].base_paddr);
pages = &soc->link_desc_pages;
if (!pages)
return NULL;
if (qdf_unlikely(page_id >= pages->num_pages))
return NULL;
link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
return link_desc_va; return link_desc_va;
} }

View File

@@ -85,7 +85,6 @@
#define MAX_VDEV_CNT 51 #define MAX_VDEV_CNT 51
#endif #endif
#define MAX_LINK_DESC_BANKS 8
#define MAX_TXDESC_POOLS 4 #define MAX_TXDESC_POOLS 4
#define MAX_RXDESC_POOLS 4 #define MAX_RXDESC_POOLS 4
#define MAX_REO_DEST_RINGS 4 #define MAX_REO_DEST_RINGS 4
@@ -961,14 +960,8 @@ struct dp_soc {
/* Device ID coming from Bus sub-system */ /* Device ID coming from Bus sub-system */
uint32_t device_id; uint32_t device_id;
/* Link descriptor memory banks */ /* Link descriptor pages */
struct { struct qdf_mem_multi_page_t link_desc_pages;
void *base_vaddr_unaligned;
void *base_vaddr;
qdf_dma_addr_t base_paddr_unaligned;
qdf_dma_addr_t base_paddr;
uint32_t size;
} link_desc_banks[MAX_LINK_DESC_BANKS];
/* Link descriptor Idle list for HW internal use (SRNG mode) */ /* Link descriptor Idle list for HW internal use (SRNG mode) */
struct dp_srng wbm_idle_link_ring; struct dp_srng wbm_idle_link_ring;
@@ -1280,20 +1273,36 @@ struct dp_ipa_resources {
#define DP_NAC_MAX_CLIENT 24 #define DP_NAC_MAX_CLIENT 24
/* /*
* Macros to setup link descriptor cookies - for link descriptors, we just * 24 bits cookie size
* need first 3 bits to store bank ID. The remaining bytes will be used set a * 10 bits page id 0 ~ 1023 for MCL
* unique ID, which will be useful in debugging * 3 bits page id 0 ~ 7 for WIN
* WBM Idle List Desc size = 128,
* Num descs per page = 4096/128 = 32 for MCL
* Num descs per page = 2MB/128 = 16384 for WIN
*/ */
#define LINK_DESC_BANK_ID_MASK 0x7 /*
#define LINK_DESC_ID_SHIFT 3 * Macros to setup link descriptor cookies - for link descriptors, we just
* need first 3 bits to store bank/page ID for WIN. The
* remaining bytes will be used to set a unique ID, which will
* be useful in debugging
*/
#ifdef MAX_ALLOC_PAGE_SIZE
#define LINK_DESC_PAGE_ID_MASK 0x007FE0
#define LINK_DESC_ID_SHIFT 5
#define LINK_DESC_COOKIE(_desc_id, _page_id) \
((((_page_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_desc_id))
#define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
#else
#define LINK_DESC_PAGE_ID_MASK 0x7
#define LINK_DESC_ID_SHIFT 3
#define LINK_DESC_COOKIE(_desc_id, _page_id) \
((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_page_id))
#define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
((_cookie) & LINK_DESC_PAGE_ID_MASK)
#endif
#define LINK_DESC_ID_START 0x8000 #define LINK_DESC_ID_START 0x8000
#define LINK_DESC_COOKIE(_desc_id, _bank_id) \
((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_bank_id))
#define LINK_DESC_COOKIE_BANK_ID(_cookie) \
((_cookie) & LINK_DESC_BANK_ID_MASK)
/* same as ieee80211_nac_param */ /* same as ieee80211_nac_param */
enum dp_nac_param_cmd { enum dp_nac_param_cmd {
/* IEEE80211_NAC_PARAM_ADD */ /* IEEE80211_NAC_PARAM_ADD */

View File

@@ -39,6 +39,7 @@
* Return: aligned value. * Return: aligned value.
*/ */
#define qdf_align(a, align_size) __qdf_align(a, align_size) #define qdf_align(a, align_size) __qdf_align(a, align_size)
#define qdf_page_size __page_size
/** /**
* struct qdf_mem_dma_page_t - Allocated dmaable page * struct qdf_mem_dma_page_t - Allocated dmaable page
@@ -64,6 +65,7 @@ struct qdf_mem_multi_page_t {
uint16_t num_pages; uint16_t num_pages;
struct qdf_mem_dma_page_t *dma_pages; struct qdf_mem_dma_page_t *dma_pages;
void **cacheable_pages; void **cacheable_pages;
qdf_size_t page_size;
}; };

View File

@@ -98,6 +98,7 @@ typedef struct __qdf_mempool_ctxt {
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#define __page_size ((size_t)PAGE_SIZE)
#define __qdf_align(a, mask) ALIGN(a, mask) #define __qdf_align(a, mask) ALIGN(a, mask)
#ifdef DISABLE_MEMDEBUG_PANIC #ifdef DISABLE_MEMDEBUG_PANIC

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -42,7 +42,5 @@
#define __alloc_size(ptr) ksize(ptr) #define __alloc_size(ptr) ksize(ptr)
#define __page_size ((size_t)PAGE_SIZE)
#endif /* __I_QDF_TALLOC_H */ #endif /* __I_QDF_TALLOC_H */

View File

@@ -1343,10 +1343,13 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
void **cacheable_pages = NULL; void **cacheable_pages = NULL;
uint16_t i; uint16_t i;
pages->num_element_per_page = PAGE_SIZE / element_size; if (!pages->page_size)
pages->page_size = qdf_page_size;
pages->num_element_per_page = pages->page_size / element_size;
if (!pages->num_element_per_page) { if (!pages->num_element_per_page) {
qdf_print("Invalid page %d or element size %d", qdf_print("Invalid page %d or element size %d",
(int)PAGE_SIZE, (int)element_size); (int)pages->page_size, (int)element_size);
goto out_fail; goto out_fail;
} }
@@ -1365,7 +1368,7 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
cacheable_pages = pages->cacheable_pages; cacheable_pages = pages->cacheable_pages;
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
cacheable_pages[page_idx] = qdf_mem_malloc_debug( cacheable_pages[page_idx] = qdf_mem_malloc_debug(
PAGE_SIZE, func, line, caller, 0); pages->page_size, func, line, caller, 0);
if (!cacheable_pages[page_idx]) if (!cacheable_pages[page_idx])
goto page_alloc_fail; goto page_alloc_fail;
} }
@@ -1381,7 +1384,7 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
dma_pages->page_v_addr_start = dma_pages->page_v_addr_start =
qdf_mem_alloc_consistent_debug( qdf_mem_alloc_consistent_debug(
osdev, osdev->dev, PAGE_SIZE, osdev, osdev->dev, pages->page_size,
&dma_pages->page_p_addr, &dma_pages->page_p_addr,
func, line, caller); func, line, caller);
if (!dma_pages->page_v_addr_start) { if (!dma_pages->page_v_addr_start) {
@@ -1390,7 +1393,7 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
goto page_alloc_fail; goto page_alloc_fail;
} }
dma_pages->page_v_addr_end = dma_pages->page_v_addr_end =
dma_pages->page_v_addr_start + PAGE_SIZE; dma_pages->page_v_addr_start + pages->page_size;
dma_pages++; dma_pages++;
} }
pages->cacheable_pages = NULL; pages->cacheable_pages = NULL;
@@ -1408,7 +1411,7 @@ page_alloc_fail:
for (i = 0; i < page_idx; i++) { for (i = 0; i < page_idx; i++) {
qdf_mem_free_consistent_debug( qdf_mem_free_consistent_debug(
osdev, osdev->dev, osdev, osdev->dev,
PAGE_SIZE, dma_pages->page_v_addr_start, pages->page_size, dma_pages->page_v_addr_start,
dma_pages->page_p_addr, memctxt, func, line); dma_pages->page_p_addr, memctxt, func, line);
dma_pages++; dma_pages++;
} }
@@ -1444,6 +1447,9 @@ void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
unsigned int page_idx; unsigned int page_idx;
struct qdf_mem_dma_page_t *dma_pages; struct qdf_mem_dma_page_t *dma_pages;
if (!pages->page_size)
pages->page_size = qdf_page_size;
if (cacheable) { if (cacheable) {
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
qdf_mem_free_debug(pages->cacheable_pages[page_idx], qdf_mem_free_debug(pages->cacheable_pages[page_idx],
@@ -1453,7 +1459,7 @@ void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
dma_pages = pages->dma_pages; dma_pages = pages->dma_pages;
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
qdf_mem_free_consistent_debug( qdf_mem_free_consistent_debug(
osdev, osdev->dev, PAGE_SIZE, osdev, osdev->dev, pages->page_size,
dma_pages->page_v_addr_start, dma_pages->page_v_addr_start,
dma_pages->page_p_addr, memctxt, func, line); dma_pages->page_p_addr, memctxt, func, line);
dma_pages++; dma_pages++;
@@ -1520,10 +1526,13 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
void **cacheable_pages = NULL; void **cacheable_pages = NULL;
uint16_t i; uint16_t i;
pages->num_element_per_page = PAGE_SIZE / element_size; if (!pages->page_size)
pages->page_size = qdf_page_size;
pages->num_element_per_page = pages->page_size / element_size;
if (!pages->num_element_per_page) { if (!pages->num_element_per_page) {
qdf_print("Invalid page %d or element size %d", qdf_print("Invalid page %d or element size %d",
(int)PAGE_SIZE, (int)element_size); (int)pages->page_size, (int)element_size);
goto out_fail; goto out_fail;
} }
@@ -1540,7 +1549,8 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
cacheable_pages = pages->cacheable_pages; cacheable_pages = pages->cacheable_pages;
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE); cacheable_pages[page_idx] =
qdf_mem_malloc(pages->page_size);
if (!cacheable_pages[page_idx]) if (!cacheable_pages[page_idx])
goto page_alloc_fail; goto page_alloc_fail;
} }
@@ -1555,7 +1565,7 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
dma_pages->page_v_addr_start = dma_pages->page_v_addr_start =
qdf_mem_alloc_consistent(osdev, osdev->dev, qdf_mem_alloc_consistent(osdev, osdev->dev,
PAGE_SIZE, pages->page_size,
&dma_pages->page_p_addr); &dma_pages->page_p_addr);
if (!dma_pages->page_v_addr_start) { if (!dma_pages->page_v_addr_start) {
qdf_print("dmaable page alloc fail pi %d", qdf_print("dmaable page alloc fail pi %d",
@@ -1563,7 +1573,7 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
goto page_alloc_fail; goto page_alloc_fail;
} }
dma_pages->page_v_addr_end = dma_pages->page_v_addr_end =
dma_pages->page_v_addr_start + PAGE_SIZE; dma_pages->page_v_addr_start + pages->page_size;
dma_pages++; dma_pages++;
} }
pages->cacheable_pages = NULL; pages->cacheable_pages = NULL;
@@ -1578,7 +1588,8 @@ page_alloc_fail:
} else { } else {
dma_pages = pages->dma_pages; dma_pages = pages->dma_pages;
for (i = 0; i < page_idx; i++) { for (i = 0; i < page_idx; i++) {
qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, qdf_mem_free_consistent(
osdev, osdev->dev, pages->page_size,
dma_pages->page_v_addr_start, dma_pages->page_v_addr_start,
dma_pages->page_p_addr, memctxt); dma_pages->page_p_addr, memctxt);
dma_pages++; dma_pages++;
@@ -1612,6 +1623,9 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
unsigned int page_idx; unsigned int page_idx;
struct qdf_mem_dma_page_t *dma_pages; struct qdf_mem_dma_page_t *dma_pages;
if (!pages->page_size)
pages->page_size = qdf_page_size;
if (cacheable) { if (cacheable) {
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
qdf_mem_free(pages->cacheable_pages[page_idx]); qdf_mem_free(pages->cacheable_pages[page_idx]);
@@ -1619,7 +1633,8 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
} else { } else {
dma_pages = pages->dma_pages; dma_pages = pages->dma_pages;
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, qdf_mem_free_consistent(
osdev, osdev->dev, pages->page_size,
dma_pages->page_v_addr_start, dma_pages->page_v_addr_start,
dma_pages->page_p_addr, memctxt); dma_pages->page_p_addr, memctxt);
dma_pages++; dma_pages++;