Merge "qcacmn: Break up DMA mem alloc for HW desc banks in multi pages"
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

commit
16b11006ed
@@ -2054,4 +2054,34 @@ void dp_is_hw_dbs_enable(struct dp_soc *soc,
|
||||
#if defined(WLAN_SUPPORT_RX_FISA)
|
||||
void dp_rx_dump_fisa_table(struct dp_soc *soc);
|
||||
#endif /* WLAN_SUPPORT_RX_FISA */
|
||||
|
||||
#ifdef MAX_ALLOC_PAGE_SIZE
|
||||
/**
|
||||
* dp_set_page_size() - Set the max page size for hw link desc.
|
||||
* For MCL the page size is set to OS defined value and for WIN
|
||||
* the page size is set to the max_alloc_size cfg ini
|
||||
* param.
|
||||
* This is to ensure that WIN gets contiguous memory allocations
|
||||
* as per requirement.
|
||||
* @pages: link desc page handle
|
||||
* @max_alloc_size: max_alloc_size
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline
|
||||
void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
|
||||
uint32_t max_alloc_size)
|
||||
{
|
||||
pages->page_size = qdf_page_size;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline
|
||||
void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
|
||||
uint32_t max_alloc_size)
|
||||
{
|
||||
pages->page_size = max_alloc_size;
|
||||
}
|
||||
#endif /* MAX_ALLOC_PAGE_SIZE */
|
||||
|
||||
#endif /* #ifndef _DP_INTERNAL_H_ */
|
||||
|
@@ -2098,12 +2098,16 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
|
||||
uint32_t total_link_descs, total_mem_size;
|
||||
uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
|
||||
uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
|
||||
uint32_t num_link_desc_banks;
|
||||
uint32_t last_bank_size = 0;
|
||||
uint32_t entry_size, num_entries;
|
||||
int i;
|
||||
uint32_t desc_id = 0;
|
||||
uint32_t cookie = 0;
|
||||
qdf_dma_addr_t *baseaddr = NULL;
|
||||
uint32_t page_idx = 0;
|
||||
struct qdf_mem_multi_page_t *pages;
|
||||
struct qdf_mem_dma_page_t *dma_pages;
|
||||
uint32_t offset = 0;
|
||||
uint32_t count = 0;
|
||||
uint32_t num_descs_per_page;
|
||||
|
||||
/* Only Tx queue descriptors are allocated from common link descriptor
|
||||
* pool Rx queue descriptors are not included in this because (REO queue
|
||||
@@ -2138,98 +2142,28 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
|
||||
|
||||
total_mem_size += link_desc_align;
|
||||
|
||||
if (total_mem_size <= max_alloc_size) {
|
||||
num_link_desc_banks = 0;
|
||||
last_bank_size = total_mem_size;
|
||||
} else {
|
||||
num_link_desc_banks = (total_mem_size) /
|
||||
(max_alloc_size - link_desc_align);
|
||||
last_bank_size = total_mem_size %
|
||||
(max_alloc_size - link_desc_align);
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
FL("total_mem_size: %d, num_link_desc_banks: %u"),
|
||||
total_mem_size, num_link_desc_banks);
|
||||
FL("total_mem_size: %d"), total_mem_size);
|
||||
|
||||
for (i = 0; i < num_link_desc_banks; i++) {
|
||||
if (!dp_is_soc_reinit(soc)) {
|
||||
baseaddr = &soc->link_desc_banks[i].
|
||||
base_paddr_unaligned;
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned =
|
||||
qdf_mem_alloc_consistent(soc->osdev,
|
||||
soc->osdev->dev,
|
||||
max_alloc_size,
|
||||
baseaddr);
|
||||
pages = &soc->link_desc_pages;
|
||||
dp_set_max_page_size(pages, max_alloc_size);
|
||||
if (!dp_is_soc_reinit(soc)) {
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
pages,
|
||||
link_desc_size,
|
||||
total_link_descs,
|
||||
0, false);
|
||||
if (!pages->num_pages) {
|
||||
dp_err("Multi page alloc fail for hw link desc pool");
|
||||
goto fail_page_alloc;
|
||||
}
|
||||
soc->link_desc_banks[i].size = max_alloc_size;
|
||||
|
||||
soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned) +
|
||||
((unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned) %
|
||||
link_desc_align));
|
||||
|
||||
soc->link_desc_banks[i].base_paddr = (unsigned long)(
|
||||
soc->link_desc_banks[i].base_paddr_unaligned) +
|
||||
((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
|
||||
(unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned));
|
||||
|
||||
if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Link descriptor memory alloc failed"));
|
||||
goto fail;
|
||||
}
|
||||
if (!dp_is_soc_reinit(soc)) {
|
||||
qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
|
||||
soc->link_desc_banks[i].size,
|
||||
"link_desc_bank");
|
||||
}
|
||||
qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
|
||||
soc->link_desc_banks[i].size,
|
||||
"link_desc_bank");
|
||||
qdf_minidump_log(
|
||||
(void *)(pages->dma_pages->page_v_addr_start),
|
||||
pages->num_pages *
|
||||
sizeof(struct qdf_mem_dma_page_t),
|
||||
"hw_link_desc_bank");
|
||||
}
|
||||
|
||||
if (last_bank_size) {
|
||||
/* Allocate last bank in case total memory required is not exact
|
||||
* multiple of max_alloc_size
|
||||
*/
|
||||
if (!dp_is_soc_reinit(soc)) {
|
||||
baseaddr = &soc->link_desc_banks[i].
|
||||
base_paddr_unaligned;
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned =
|
||||
qdf_mem_alloc_consistent(soc->osdev,
|
||||
soc->osdev->dev,
|
||||
last_bank_size,
|
||||
baseaddr);
|
||||
}
|
||||
soc->link_desc_banks[i].size = last_bank_size;
|
||||
|
||||
soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
|
||||
(soc->link_desc_banks[i].base_vaddr_unaligned) +
|
||||
((unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned) %
|
||||
link_desc_align));
|
||||
|
||||
soc->link_desc_banks[i].base_paddr =
|
||||
(unsigned long)(
|
||||
soc->link_desc_banks[i].base_paddr_unaligned) +
|
||||
((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
|
||||
(unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned));
|
||||
|
||||
if (!dp_is_soc_reinit(soc)) {
|
||||
qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
|
||||
soc->link_desc_banks[i].size,
|
||||
"link_desc_bank");
|
||||
}
|
||||
qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
|
||||
soc->link_desc_banks[i].size,
|
||||
"link_desc_bank");
|
||||
}
|
||||
|
||||
|
||||
/* Allocate and setup link descriptor idle list for HW internal use */
|
||||
entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
|
||||
total_mem_size = entry_size * total_link_descs;
|
||||
@@ -2250,27 +2184,27 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
|
||||
|
||||
hal_srng_access_start_unlocked(soc->hal_soc,
|
||||
soc->wbm_idle_link_ring.hal_srng);
|
||||
|
||||
for (i = 0; i < MAX_LINK_DESC_BANKS &&
|
||||
soc->link_desc_banks[i].base_paddr; i++) {
|
||||
uint32_t num_entries = (soc->link_desc_banks[i].size -
|
||||
((unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr) -
|
||||
(unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned)))
|
||||
/ link_desc_size;
|
||||
unsigned long paddr = (unsigned long)(
|
||||
soc->link_desc_banks[i].base_paddr);
|
||||
|
||||
while (num_entries && (desc = hal_srng_src_get_next(
|
||||
page_idx = 0; count = 0;
|
||||
offset = 0;
|
||||
pages = &soc->link_desc_pages;
|
||||
if (pages->dma_pages)
|
||||
dma_pages = pages->dma_pages;
|
||||
else
|
||||
goto fail;
|
||||
num_descs_per_page =
|
||||
pages->num_element_per_page;
|
||||
while ((desc = hal_srng_src_get_next(
|
||||
soc->hal_soc,
|
||||
soc->wbm_idle_link_ring.hal_srng))) {
|
||||
hal_set_link_desc_addr(desc,
|
||||
LINK_DESC_COOKIE(desc_id, i), paddr);
|
||||
num_entries--;
|
||||
desc_id++;
|
||||
paddr += link_desc_size;
|
||||
}
|
||||
soc->wbm_idle_link_ring.hal_srng)) &&
|
||||
(count < total_link_descs)) {
|
||||
page_idx = count / num_descs_per_page;
|
||||
offset = count % num_descs_per_page;
|
||||
cookie = LINK_DESC_COOKIE(count, page_idx);
|
||||
hal_set_link_desc_addr(
|
||||
desc, cookie,
|
||||
dma_pages[page_idx].page_p_addr
|
||||
+ (offset * link_desc_size));
|
||||
count++;
|
||||
}
|
||||
hal_srng_access_end_unlocked(soc->hal_soc,
|
||||
soc->wbm_idle_link_ring.hal_srng);
|
||||
@@ -2322,40 +2256,38 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
|
||||
scatter_buf_ptr = (uint8_t *)(
|
||||
soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
|
||||
rem_entries = num_entries_per_buf;
|
||||
|
||||
for (i = 0; i < MAX_LINK_DESC_BANKS &&
|
||||
soc->link_desc_banks[i].base_paddr; i++) {
|
||||
uint32_t num_link_descs =
|
||||
(soc->link_desc_banks[i].size -
|
||||
((unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr) -
|
||||
(unsigned long)(
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned)))
|
||||
/ link_desc_size;
|
||||
unsigned long paddr = (unsigned long)(
|
||||
soc->link_desc_banks[i].base_paddr);
|
||||
|
||||
while (num_link_descs) {
|
||||
hal_set_link_desc_addr((void *)scatter_buf_ptr,
|
||||
LINK_DESC_COOKIE(desc_id, i), paddr);
|
||||
num_link_descs--;
|
||||
desc_id++;
|
||||
paddr += link_desc_size;
|
||||
rem_entries--;
|
||||
if (rem_entries) {
|
||||
scatter_buf_ptr += entry_size;
|
||||
} else {
|
||||
rem_entries = num_entries_per_buf;
|
||||
scatter_buf_num++;
|
||||
|
||||
if (scatter_buf_num >= num_scatter_bufs)
|
||||
break;
|
||||
|
||||
scatter_buf_ptr = (uint8_t *)(
|
||||
soc->wbm_idle_scatter_buf_base_vaddr[
|
||||
scatter_buf_num]);
|
||||
}
|
||||
pages = &soc->link_desc_pages;
|
||||
page_idx = 0; count = 0;
|
||||
offset = 0;
|
||||
num_descs_per_page =
|
||||
pages->num_element_per_page;
|
||||
if (pages->dma_pages)
|
||||
dma_pages = pages->dma_pages;
|
||||
else
|
||||
goto fail;
|
||||
while (count < total_link_descs) {
|
||||
page_idx = count / num_descs_per_page;
|
||||
offset = count % num_descs_per_page;
|
||||
cookie = LINK_DESC_COOKIE(count, page_idx);
|
||||
hal_set_link_desc_addr(
|
||||
(void *)scatter_buf_ptr,
|
||||
cookie,
|
||||
dma_pages[page_idx].page_p_addr +
|
||||
(offset * link_desc_size));
|
||||
rem_entries--;
|
||||
if (rem_entries) {
|
||||
scatter_buf_ptr += entry_size;
|
||||
} else {
|
||||
rem_entries = num_entries_per_buf;
|
||||
scatter_buf_num++;
|
||||
if (scatter_buf_num >= num_scatter_bufs)
|
||||
break;
|
||||
scatter_buf_ptr =
|
||||
(uint8_t *)
|
||||
(soc->wbm_idle_scatter_buf_base_vaddr[
|
||||
scatter_buf_num]);
|
||||
}
|
||||
count++;
|
||||
}
|
||||
/* Setup link descriptor idle list in HW */
|
||||
hal_setup_link_idle_list(soc->hal_soc,
|
||||
@@ -2384,17 +2316,15 @@ fail:
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
|
||||
if (soc->link_desc_banks[i].base_vaddr_unaligned) {
|
||||
qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
|
||||
soc->link_desc_banks[i].size,
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned,
|
||||
soc->link_desc_banks[i].base_paddr_unaligned,
|
||||
0);
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
|
||||
}
|
||||
}
|
||||
pages = &soc->link_desc_pages;
|
||||
qdf_minidump_remove(
|
||||
(void *)pages->dma_pages->page_v_addr_start);
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
pages, 0, false);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
|
||||
fail_page_alloc:
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2403,6 +2333,7 @@ fail:
|
||||
static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
|
||||
{
|
||||
int i;
|
||||
struct qdf_mem_multi_page_t *pages;
|
||||
|
||||
if (soc->wbm_idle_link_ring.hal_srng) {
|
||||
qdf_minidump_remove(
|
||||
@@ -2421,17 +2352,11 @@ static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
|
||||
if (soc->link_desc_banks[i].base_vaddr_unaligned) {
|
||||
qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr);
|
||||
qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
|
||||
soc->link_desc_banks[i].size,
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned,
|
||||
soc->link_desc_banks[i].base_paddr_unaligned,
|
||||
0);
|
||||
soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
|
||||
}
|
||||
}
|
||||
pages = &soc->link_desc_pages;
|
||||
qdf_minidump_remove(
|
||||
(void *)pages->dma_pages->page_v_addr_start);
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
pages, 0, false);
|
||||
}
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
|
@@ -688,8 +688,9 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
|
||||
* dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
|
||||
* the MSDU Link Descriptor
|
||||
* @soc: core txrx main context
|
||||
* @buf_info: buf_info include cookie that used to lookup virtual address of
|
||||
* link descriptor Normally this is just an index into a per SOC array.
|
||||
* @buf_info: buf_info includes cookie that is used to lookup
|
||||
* virtual address of link descriptor after deriving the page id
|
||||
* and the offset or index of the desc on the associatde page.
|
||||
*
|
||||
* This is the VA of the link descriptor, that HAL layer later uses to
|
||||
* retrieve the list of MSDU's for a given MPDU.
|
||||
@@ -701,16 +702,16 @@ void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
|
||||
struct hal_buf_info *buf_info)
|
||||
{
|
||||
void *link_desc_va;
|
||||
uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
|
||||
|
||||
|
||||
/* TODO */
|
||||
/* Add sanity for cookie */
|
||||
|
||||
link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
|
||||
(buf_info->paddr -
|
||||
soc->link_desc_banks[bank_id].base_paddr);
|
||||
struct qdf_mem_multi_page_t *pages;
|
||||
uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
|
||||
|
||||
pages = &soc->link_desc_pages;
|
||||
if (!pages)
|
||||
return NULL;
|
||||
if (qdf_unlikely(page_id >= pages->num_pages))
|
||||
return NULL;
|
||||
link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
|
||||
(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
|
||||
return link_desc_va;
|
||||
}
|
||||
|
||||
|
@@ -85,7 +85,6 @@
|
||||
#define MAX_VDEV_CNT 51
|
||||
#endif
|
||||
|
||||
#define MAX_LINK_DESC_BANKS 8
|
||||
#define MAX_TXDESC_POOLS 4
|
||||
#define MAX_RXDESC_POOLS 4
|
||||
#define MAX_REO_DEST_RINGS 4
|
||||
@@ -961,14 +960,8 @@ struct dp_soc {
|
||||
/* Device ID coming from Bus sub-system */
|
||||
uint32_t device_id;
|
||||
|
||||
/* Link descriptor memory banks */
|
||||
struct {
|
||||
void *base_vaddr_unaligned;
|
||||
void *base_vaddr;
|
||||
qdf_dma_addr_t base_paddr_unaligned;
|
||||
qdf_dma_addr_t base_paddr;
|
||||
uint32_t size;
|
||||
} link_desc_banks[MAX_LINK_DESC_BANKS];
|
||||
/* Link descriptor pages */
|
||||
struct qdf_mem_multi_page_t link_desc_pages;
|
||||
|
||||
/* Link descriptor Idle list for HW internal use (SRNG mode) */
|
||||
struct dp_srng wbm_idle_link_ring;
|
||||
@@ -1280,20 +1273,36 @@ struct dp_ipa_resources {
|
||||
#define DP_NAC_MAX_CLIENT 24
|
||||
|
||||
/*
|
||||
* Macros to setup link descriptor cookies - for link descriptors, we just
|
||||
* need first 3 bits to store bank ID. The remaining bytes will be used set a
|
||||
* unique ID, which will be useful in debugging
|
||||
* 24 bits cookie size
|
||||
* 10 bits page id 0 ~ 1023 for MCL
|
||||
* 3 bits page id 0 ~ 7 for WIN
|
||||
* WBM Idle List Desc size = 128,
|
||||
* Num descs per page = 4096/128 = 32 for MCL
|
||||
* Num descs per page = 2MB/128 = 16384 for WIN
|
||||
*/
|
||||
#define LINK_DESC_BANK_ID_MASK 0x7
|
||||
#define LINK_DESC_ID_SHIFT 3
|
||||
/*
|
||||
* Macros to setup link descriptor cookies - for link descriptors, we just
|
||||
* need first 3 bits to store bank/page ID for WIN. The
|
||||
* remaining bytes will be used to set a unique ID, which will
|
||||
* be useful in debugging
|
||||
*/
|
||||
#ifdef MAX_ALLOC_PAGE_SIZE
|
||||
#define LINK_DESC_PAGE_ID_MASK 0x007FE0
|
||||
#define LINK_DESC_ID_SHIFT 5
|
||||
#define LINK_DESC_COOKIE(_desc_id, _page_id) \
|
||||
((((_page_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_desc_id))
|
||||
#define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
|
||||
(((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
|
||||
#else
|
||||
#define LINK_DESC_PAGE_ID_MASK 0x7
|
||||
#define LINK_DESC_ID_SHIFT 3
|
||||
#define LINK_DESC_COOKIE(_desc_id, _page_id) \
|
||||
((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_page_id))
|
||||
#define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
|
||||
((_cookie) & LINK_DESC_PAGE_ID_MASK)
|
||||
#endif
|
||||
#define LINK_DESC_ID_START 0x8000
|
||||
|
||||
#define LINK_DESC_COOKIE(_desc_id, _bank_id) \
|
||||
((((_desc_id) + LINK_DESC_ID_START) << LINK_DESC_ID_SHIFT) | (_bank_id))
|
||||
|
||||
#define LINK_DESC_COOKIE_BANK_ID(_cookie) \
|
||||
((_cookie) & LINK_DESC_BANK_ID_MASK)
|
||||
|
||||
/* same as ieee80211_nac_param */
|
||||
enum dp_nac_param_cmd {
|
||||
/* IEEE80211_NAC_PARAM_ADD */
|
||||
|
@@ -39,6 +39,7 @@
|
||||
* Return: aligned value.
|
||||
*/
|
||||
#define qdf_align(a, align_size) __qdf_align(a, align_size)
|
||||
#define qdf_page_size __page_size
|
||||
|
||||
/**
|
||||
* struct qdf_mem_dma_page_t - Allocated dmaable page
|
||||
@@ -64,6 +65,7 @@ struct qdf_mem_multi_page_t {
|
||||
uint16_t num_pages;
|
||||
struct qdf_mem_dma_page_t *dma_pages;
|
||||
void **cacheable_pages;
|
||||
qdf_size_t page_size;
|
||||
};
|
||||
|
||||
|
||||
|
@@ -98,6 +98,7 @@ typedef struct __qdf_mempool_ctxt {
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#define __page_size ((size_t)PAGE_SIZE)
|
||||
#define __qdf_align(a, mask) ALIGN(a, mask)
|
||||
|
||||
#ifdef DISABLE_MEMDEBUG_PANIC
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
@@ -42,7 +42,5 @@
|
||||
|
||||
#define __alloc_size(ptr) ksize(ptr)
|
||||
|
||||
#define __page_size ((size_t)PAGE_SIZE)
|
||||
|
||||
#endif /* __I_QDF_TALLOC_H */
|
||||
|
||||
|
@@ -1343,10 +1343,13 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
|
||||
void **cacheable_pages = NULL;
|
||||
uint16_t i;
|
||||
|
||||
pages->num_element_per_page = PAGE_SIZE / element_size;
|
||||
if (!pages->page_size)
|
||||
pages->page_size = qdf_page_size;
|
||||
|
||||
pages->num_element_per_page = pages->page_size / element_size;
|
||||
if (!pages->num_element_per_page) {
|
||||
qdf_print("Invalid page %d or element size %d",
|
||||
(int)PAGE_SIZE, (int)element_size);
|
||||
(int)pages->page_size, (int)element_size);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
@@ -1365,7 +1368,7 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
|
||||
cacheable_pages = pages->cacheable_pages;
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
|
||||
cacheable_pages[page_idx] = qdf_mem_malloc_debug(
|
||||
PAGE_SIZE, func, line, caller, 0);
|
||||
pages->page_size, func, line, caller, 0);
|
||||
if (!cacheable_pages[page_idx])
|
||||
goto page_alloc_fail;
|
||||
}
|
||||
@@ -1381,7 +1384,7 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
|
||||
dma_pages->page_v_addr_start =
|
||||
qdf_mem_alloc_consistent_debug(
|
||||
osdev, osdev->dev, PAGE_SIZE,
|
||||
osdev, osdev->dev, pages->page_size,
|
||||
&dma_pages->page_p_addr,
|
||||
func, line, caller);
|
||||
if (!dma_pages->page_v_addr_start) {
|
||||
@@ -1390,7 +1393,7 @@ void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
|
||||
goto page_alloc_fail;
|
||||
}
|
||||
dma_pages->page_v_addr_end =
|
||||
dma_pages->page_v_addr_start + PAGE_SIZE;
|
||||
dma_pages->page_v_addr_start + pages->page_size;
|
||||
dma_pages++;
|
||||
}
|
||||
pages->cacheable_pages = NULL;
|
||||
@@ -1408,7 +1411,7 @@ page_alloc_fail:
|
||||
for (i = 0; i < page_idx; i++) {
|
||||
qdf_mem_free_consistent_debug(
|
||||
osdev, osdev->dev,
|
||||
PAGE_SIZE, dma_pages->page_v_addr_start,
|
||||
pages->page_size, dma_pages->page_v_addr_start,
|
||||
dma_pages->page_p_addr, memctxt, func, line);
|
||||
dma_pages++;
|
||||
}
|
||||
@@ -1444,6 +1447,9 @@ void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
|
||||
unsigned int page_idx;
|
||||
struct qdf_mem_dma_page_t *dma_pages;
|
||||
|
||||
if (!pages->page_size)
|
||||
pages->page_size = qdf_page_size;
|
||||
|
||||
if (cacheable) {
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
|
||||
qdf_mem_free_debug(pages->cacheable_pages[page_idx],
|
||||
@@ -1453,7 +1459,7 @@ void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
|
||||
dma_pages = pages->dma_pages;
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
|
||||
qdf_mem_free_consistent_debug(
|
||||
osdev, osdev->dev, PAGE_SIZE,
|
||||
osdev, osdev->dev, pages->page_size,
|
||||
dma_pages->page_v_addr_start,
|
||||
dma_pages->page_p_addr, memctxt, func, line);
|
||||
dma_pages++;
|
||||
@@ -1520,10 +1526,13 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
|
||||
void **cacheable_pages = NULL;
|
||||
uint16_t i;
|
||||
|
||||
pages->num_element_per_page = PAGE_SIZE / element_size;
|
||||
if (!pages->page_size)
|
||||
pages->page_size = qdf_page_size;
|
||||
|
||||
pages->num_element_per_page = pages->page_size / element_size;
|
||||
if (!pages->num_element_per_page) {
|
||||
qdf_print("Invalid page %d or element size %d",
|
||||
(int)PAGE_SIZE, (int)element_size);
|
||||
(int)pages->page_size, (int)element_size);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
@@ -1540,7 +1549,8 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
|
||||
|
||||
cacheable_pages = pages->cacheable_pages;
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
|
||||
cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
|
||||
cacheable_pages[page_idx] =
|
||||
qdf_mem_malloc(pages->page_size);
|
||||
if (!cacheable_pages[page_idx])
|
||||
goto page_alloc_fail;
|
||||
}
|
||||
@@ -1555,7 +1565,7 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
|
||||
dma_pages->page_v_addr_start =
|
||||
qdf_mem_alloc_consistent(osdev, osdev->dev,
|
||||
PAGE_SIZE,
|
||||
pages->page_size,
|
||||
&dma_pages->page_p_addr);
|
||||
if (!dma_pages->page_v_addr_start) {
|
||||
qdf_print("dmaable page alloc fail pi %d",
|
||||
@@ -1563,7 +1573,7 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
|
||||
goto page_alloc_fail;
|
||||
}
|
||||
dma_pages->page_v_addr_end =
|
||||
dma_pages->page_v_addr_start + PAGE_SIZE;
|
||||
dma_pages->page_v_addr_start + pages->page_size;
|
||||
dma_pages++;
|
||||
}
|
||||
pages->cacheable_pages = NULL;
|
||||
@@ -1578,7 +1588,8 @@ page_alloc_fail:
|
||||
} else {
|
||||
dma_pages = pages->dma_pages;
|
||||
for (i = 0; i < page_idx; i++) {
|
||||
qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
|
||||
qdf_mem_free_consistent(
|
||||
osdev, osdev->dev, pages->page_size,
|
||||
dma_pages->page_v_addr_start,
|
||||
dma_pages->page_p_addr, memctxt);
|
||||
dma_pages++;
|
||||
@@ -1612,6 +1623,9 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
|
||||
unsigned int page_idx;
|
||||
struct qdf_mem_dma_page_t *dma_pages;
|
||||
|
||||
if (!pages->page_size)
|
||||
pages->page_size = qdf_page_size;
|
||||
|
||||
if (cacheable) {
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
|
||||
qdf_mem_free(pages->cacheable_pages[page_idx]);
|
||||
@@ -1619,7 +1633,8 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
|
||||
} else {
|
||||
dma_pages = pages->dma_pages;
|
||||
for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
|
||||
qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
|
||||
qdf_mem_free_consistent(
|
||||
osdev, osdev->dev, pages->page_size,
|
||||
dma_pages->page_v_addr_start,
|
||||
dma_pages->page_p_addr, memctxt);
|
||||
dma_pages++;
|
||||
|
Reference in New Issue
Block a user