qcacmn: Add support for WLAN-IPA WDI2 with SMMU Stage 1
Update WLAN-IPA WDI-2 datapath for DMA buffer sharing with SMMU Stage 1 translation support. When SMMU Stage 1 is enabled DMA APIs return IO virtual address(IOVA) instead of physical address. This IOVA need to mapped to physical address by IPA module before accessing them. Change-Id: I33082bc74760b0f12c348238c6f48f5ce5750172 CRS-Fixed: 2072953
This commit is contained in:

committed by
nshrivas

parent
59c25048cd
commit
58e0adfb53
@@ -282,27 +282,22 @@ struct txrx_pdev_cfg_param_t {
|
||||
* ol_txrx_ipa_resources - Resources needed for IPA
|
||||
*/
|
||||
struct ol_txrx_ipa_resources {
|
||||
qdf_dma_addr_t ce_sr_base_paddr;
|
||||
qdf_shared_mem_t *ce_sr;
|
||||
uint32_t ce_sr_ring_size;
|
||||
qdf_dma_addr_t ce_reg_paddr;
|
||||
|
||||
qdf_dma_addr_t tx_comp_ring_base_paddr;
|
||||
uint32_t tx_comp_ring_size;
|
||||
qdf_shared_mem_t *tx_comp_ring;
|
||||
uint32_t tx_num_alloc_buffer;
|
||||
|
||||
qdf_dma_addr_t rx_rdy_ring_base_paddr;
|
||||
uint32_t rx_rdy_ring_size;
|
||||
qdf_dma_addr_t rx_proc_done_idx_paddr;
|
||||
void *rx_proc_done_idx_vaddr;
|
||||
qdf_shared_mem_t *rx_rdy_ring;
|
||||
qdf_shared_mem_t *rx_proc_done_idx;
|
||||
|
||||
qdf_dma_addr_t rx2_rdy_ring_base_paddr;
|
||||
uint32_t rx2_rdy_ring_size;
|
||||
qdf_dma_addr_t rx2_proc_done_idx_paddr;
|
||||
void *rx2_proc_done_idx_vaddr;
|
||||
qdf_shared_mem_t *rx2_rdy_ring;
|
||||
qdf_shared_mem_t *rx2_proc_done_idx;
|
||||
|
||||
/* IPA UC doorbell registers paddr */
|
||||
qdf_dma_addr_t tx_comp_doorbell_paddr;
|
||||
qdf_dma_addr_t rx_ready_doorbell_paddr;
|
||||
qdf_dma_addr_t tx_comp_doorbell_dmaaddr;
|
||||
qdf_dma_addr_t rx_ready_doorbell_dmaaddr;
|
||||
|
||||
uint32_t tx_pipe_handle;
|
||||
uint32_t rx_pipe_handle;
|
||||
|
@@ -522,7 +522,7 @@ static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
|
||||
#define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
|
||||
|
||||
void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr);
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
|
||||
*
|
||||
@@ -447,14 +447,14 @@ struct ce_sendlist {
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
void ce_ipa_get_resource(struct CE_handle *ce,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr);
|
||||
#else
|
||||
/**
|
||||
* ce_ipa_get_resource() - get uc resource on copyengine
|
||||
* @ce: copyengine context
|
||||
* @ce_sr_base_paddr: copyengine source ring base physical address
|
||||
* @ce_sr: copyengine source ring resource info
|
||||
* @ce_sr_ring_size: copyengine source ring size
|
||||
* @ce_reg_paddr: copyengine register physical address
|
||||
*
|
||||
@@ -467,7 +467,7 @@ void ce_ipa_get_resource(struct CE_handle *ce,
|
||||
* Return: None
|
||||
*/
|
||||
static inline void ce_ipa_get_resource(struct CE_handle *ce,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr)
|
||||
{
|
||||
|
@@ -698,6 +698,104 @@ static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
|
||||
QDF_BUG(0);
|
||||
}
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
/**
|
||||
* ce_alloc_desc_ring() - Allocate copyengine descriptor ring
|
||||
* @scn: softc instance
|
||||
* @ce_id: ce in question
|
||||
* @base_addr: pointer to copyengine ring base address
|
||||
* @ce_ring: copyengine instance
|
||||
* @nentries: number of entries should be allocated
|
||||
* @desc_size: ce desc size
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS - for success
|
||||
*/
|
||||
static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
|
||||
qdf_dma_addr_t *base_addr,
|
||||
struct CE_ring_state *ce_ring,
|
||||
unsigned int nentries, uint32_t desc_size)
|
||||
{
|
||||
if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
|
||||
scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
|
||||
nentries * desc_size + CE_DESC_RING_ALIGN);
|
||||
if (!scn->ipa_ce_ring) {
|
||||
HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
|
||||
__func__);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
|
||||
&scn->ipa_ce_ring->mem_info);
|
||||
ce_ring->base_addr_owner_space_unaligned =
|
||||
scn->ipa_ce_ring->vaddr;
|
||||
} else {
|
||||
ce_ring->base_addr_owner_space_unaligned =
|
||||
qdf_mem_alloc_consistent(scn->qdf_dev,
|
||||
scn->qdf_dev->dev,
|
||||
(nentries * desc_size +
|
||||
CE_DESC_RING_ALIGN),
|
||||
base_addr);
|
||||
if (!ce_ring->base_addr_owner_space_unaligned) {
|
||||
HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
|
||||
__func__, CE_id);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* ce_free_desc_ring() - Frees copyengine descriptor ring
|
||||
* @scn: softc instance
|
||||
* @ce_id: ce in question
|
||||
* @ce_ring: copyengine instance
|
||||
* @desc_size: ce desc size
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
|
||||
struct CE_ring_state *ce_ring, uint32_t desc_size)
|
||||
{
|
||||
if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
|
||||
qdf_mem_shared_mem_free(scn->qdf_dev,
|
||||
scn->ipa_ce_ring);
|
||||
ce_ring->base_addr_owner_space_unaligned = NULL;
|
||||
} else {
|
||||
qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
|
||||
ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
|
||||
ce_ring->base_addr_owner_space_unaligned,
|
||||
ce_ring->base_addr_CE_space, 0);
|
||||
ce_ring->base_addr_owner_space_unaligned = NULL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
|
||||
qdf_dma_addr_t *base_addr,
|
||||
struct CE_ring_state *ce_ring,
|
||||
unsigned int nentries, uint32_t desc_size)
|
||||
{
|
||||
ce_ring->base_addr_owner_space_unaligned =
|
||||
qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
|
||||
(nentries * desc_size +
|
||||
CE_DESC_RING_ALIGN), base_addr);
|
||||
if (!ce_ring->base_addr_owner_space_unaligned) {
|
||||
HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
|
||||
__func__, CE_id);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
|
||||
struct CE_ring_state *ce_ring, uint32_t desc_size)
|
||||
{
|
||||
qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
|
||||
ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
|
||||
ce_ring->base_addr_owner_space_unaligned,
|
||||
ce_ring->base_addr_CE_space, 0);
|
||||
ce_ring->base_addr_owner_space_unaligned = NULL;
|
||||
}
|
||||
#endif /* IPA_OFFLOAD */
|
||||
|
||||
/**
|
||||
* ce_srng_based() - Does this target use srng
|
||||
* @ce_state : pointer to the state context of the CE
|
||||
@@ -790,15 +888,10 @@ static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
|
||||
/* Legacy platforms that do not support cache
|
||||
* coherent DMA are unsupported
|
||||
*/
|
||||
ce_ring->base_addr_owner_space_unaligned =
|
||||
qdf_mem_alloc_consistent(scn->qdf_dev,
|
||||
scn->qdf_dev->dev,
|
||||
(nentries *
|
||||
desc_size +
|
||||
CE_DESC_RING_ALIGN),
|
||||
&base_addr);
|
||||
if (ce_ring->base_addr_owner_space_unaligned
|
||||
== NULL) {
|
||||
if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
|
||||
ce_ring, nentries,
|
||||
desc_size) !=
|
||||
QDF_STATUS_SUCCESS) {
|
||||
HIF_ERROR("%s: ring has no DMA mem",
|
||||
__func__);
|
||||
qdf_mem_free(ptr);
|
||||
@@ -1520,15 +1613,9 @@ void ce_fini(struct CE_handle *copyeng)
|
||||
if (CE_state->src_ring->shadow_base_unaligned)
|
||||
qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
|
||||
if (CE_state->src_ring->base_addr_owner_space_unaligned)
|
||||
qdf_mem_free_consistent(scn->qdf_dev,
|
||||
scn->qdf_dev->dev,
|
||||
(CE_state->src_ring->nentries *
|
||||
desc_size +
|
||||
CE_DESC_RING_ALIGN),
|
||||
CE_state->src_ring->
|
||||
base_addr_owner_space_unaligned,
|
||||
CE_state->src_ring->
|
||||
base_addr_CE_space, 0);
|
||||
ce_free_desc_ring(scn, CE_state->id,
|
||||
CE_state->src_ring,
|
||||
desc_size);
|
||||
qdf_mem_free(CE_state->src_ring);
|
||||
}
|
||||
if (CE_state->dest_ring) {
|
||||
@@ -1537,15 +1624,9 @@ void ce_fini(struct CE_handle *copyeng)
|
||||
|
||||
desc_size = ce_get_desc_size(scn, CE_RING_DEST);
|
||||
if (CE_state->dest_ring->base_addr_owner_space_unaligned)
|
||||
qdf_mem_free_consistent(scn->qdf_dev,
|
||||
scn->qdf_dev->dev,
|
||||
(CE_state->dest_ring->nentries *
|
||||
desc_size +
|
||||
CE_DESC_RING_ALIGN),
|
||||
CE_state->dest_ring->
|
||||
base_addr_owner_space_unaligned,
|
||||
CE_state->dest_ring->
|
||||
base_addr_CE_space, 0);
|
||||
ce_free_desc_ring(scn, CE_state->id,
|
||||
CE_state->dest_ring,
|
||||
desc_size);
|
||||
qdf_mem_free(CE_state->dest_ring);
|
||||
|
||||
/* epping */
|
||||
@@ -1563,15 +1644,9 @@ void ce_fini(struct CE_handle *copyeng)
|
||||
|
||||
desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
|
||||
if (CE_state->status_ring->base_addr_owner_space_unaligned)
|
||||
qdf_mem_free_consistent(scn->qdf_dev,
|
||||
scn->qdf_dev->dev,
|
||||
(CE_state->status_ring->nentries *
|
||||
desc_size +
|
||||
CE_DESC_RING_ALIGN),
|
||||
CE_state->status_ring->
|
||||
base_addr_owner_space_unaligned,
|
||||
CE_state->status_ring->
|
||||
base_addr_CE_space, 0);
|
||||
ce_free_desc_ring(scn, CE_state->id,
|
||||
CE_state->status_ring,
|
||||
desc_size);
|
||||
qdf_mem_free(CE_state->status_ring);
|
||||
}
|
||||
|
||||
@@ -2840,7 +2915,7 @@ qdf_export_symbol(hif_ce_fastpath_cb_register);
|
||||
* Return: None
|
||||
*/
|
||||
void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr)
|
||||
{
|
||||
@@ -2849,7 +2924,7 @@ void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
|
||||
&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
|
||||
struct CE_handle *ce_hdl = pipe_info->ce_hdl;
|
||||
|
||||
ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
|
||||
ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
|
||||
ce_reg_paddr);
|
||||
}
|
||||
#endif /* IPA_OFFLOAD */
|
||||
|
@@ -195,13 +195,13 @@ hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base,
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr);
|
||||
#else
|
||||
static inline
|
||||
void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr)
|
||||
{
|
||||
|
@@ -2511,7 +2511,7 @@ qdf_export_symbol(ce_check_rx_pending);
|
||||
/**
|
||||
* ce_ipa_get_resource() - get uc resource on copyengine
|
||||
* @ce: copyengine context
|
||||
* @ce_sr_base_paddr: copyengine source ring base physical address
|
||||
* @ce_sr: copyengine source ring resource info
|
||||
* @ce_sr_ring_size: copyengine source ring size
|
||||
* @ce_reg_paddr: copyengine register physical address
|
||||
*
|
||||
@@ -2524,7 +2524,7 @@ qdf_export_symbol(ce_check_rx_pending);
|
||||
* Return: None
|
||||
*/
|
||||
void ce_ipa_get_resource(struct CE_handle *ce,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr)
|
||||
{
|
||||
@@ -2535,7 +2535,8 @@ void ce_ipa_get_resource(struct CE_handle *ce,
|
||||
struct hif_softc *scn = CE_state->scn;
|
||||
|
||||
if (CE_UNUSED == CE_state->state) {
|
||||
*ce_sr_base_paddr = 0;
|
||||
*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
|
||||
&CE_state->scn->ipa_ce_ring->mem_info) = 0;
|
||||
*ce_sr_ring_size = 0;
|
||||
return;
|
||||
}
|
||||
@@ -2552,7 +2553,7 @@ void ce_ipa_get_resource(struct CE_handle *ce,
|
||||
/* Get BAR address */
|
||||
hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
|
||||
|
||||
*ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
|
||||
*ce_sr = CE_state->scn->ipa_ce_ring;
|
||||
*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
|
||||
sizeof(struct CE_src_desc));
|
||||
*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
|
||||
*
|
||||
@@ -299,14 +299,14 @@ void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base,
|
||||
/**
|
||||
* hif_dummy_ipa_get_ce_resource - dummy call
|
||||
* @scn: HIF context
|
||||
* @sr_base_paddr: source base address
|
||||
* @ce_sr: copyengine source ring resource info
|
||||
* @sr_ring_size: source ring size
|
||||
* @reg_paddr: bus physical address
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void hif_dummy_ipa_get_ce_resource(struct hif_softc *hif_sc,
|
||||
qdf_dma_addr_t *sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *sr_ring_size,
|
||||
qdf_dma_addr_t *reg_paddr)
|
||||
{
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
|
||||
*
|
||||
@@ -56,7 +56,7 @@ int hif_dummy_dump_registers(struct hif_softc *hif_sc);
|
||||
void hif_dummy_dump_target_memory(struct hif_softc *hif_sc, void *ramdump_base,
|
||||
uint32_t address, uint32_t size);
|
||||
void hif_dummy_ipa_get_ce_resource(struct hif_softc *hif_sc,
|
||||
qdf_dma_addr_t *sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *sr_ring_size,
|
||||
qdf_dma_addr_t *reg_paddr);
|
||||
void hif_dummy_mask_interrupt_call(struct hif_softc *hif_sc);
|
||||
|
@@ -351,13 +351,13 @@ void hif_dump_target_memory(struct hif_opaque_softc *hif_hdl,
|
||||
}
|
||||
|
||||
void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_hdl,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr)
|
||||
{
|
||||
struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_hdl);
|
||||
|
||||
hif_sc->bus_ops.hif_ipa_get_ce_resource(hif_sc, ce_sr_base_paddr,
|
||||
hif_sc->bus_ops.hif_ipa_get_ce_resource(hif_sc, ce_sr,
|
||||
ce_sr_ring_size, ce_reg_paddr);
|
||||
}
|
||||
|
||||
|
@@ -74,7 +74,7 @@ struct hif_bus_ops {
|
||||
void *ramdump_base,
|
||||
uint32_t address, uint32_t size);
|
||||
void (*hif_ipa_get_ce_resource)(struct hif_softc *hif_sc,
|
||||
qdf_dma_addr_t *sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *sr_ring_size,
|
||||
qdf_dma_addr_t *reg_paddr);
|
||||
void (*hif_mask_interrupt_call)(struct hif_softc *hif_sc);
|
||||
|
@@ -199,6 +199,9 @@ struct hif_softc {
|
||||
#if HIF_CE_DEBUG_DATA_BUF
|
||||
struct ce_desc_hist hif_ce_desc_hist;
|
||||
#endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
|
||||
#ifdef IPA_OFFLOAD
|
||||
qdf_shared_mem_t *ipa_ce_ring;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void *hif_get_hal_handle(void *hif_hdl)
|
||||
|
@@ -237,12 +237,12 @@ static inline int hif_snoc_get_target_type(struct hif_softc *ol_sc,
|
||||
}
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
static int hif_set_dma_coherent_mask(struct device *dev)
|
||||
static int hif_set_dma_coherent_mask(qdf_device_t osdev)
|
||||
{
|
||||
uint8_t addr_bits;
|
||||
|
||||
if (false == hif_get_ipa_present())
|
||||
return qdf_set_dma_coherent_mask(dev,
|
||||
return qdf_set_dma_coherent_mask(osdev->dev,
|
||||
DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE);
|
||||
|
||||
if (hif_get_ipa_hw_type() < IPA_HW_v3_0)
|
||||
@@ -250,12 +250,12 @@ static int hif_set_dma_coherent_mask(struct device *dev)
|
||||
else
|
||||
addr_bits = DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE;
|
||||
|
||||
return qdf_set_dma_coherent_mask(dev, addr_bits);
|
||||
return qdf_set_dma_coherent_mask(osdev->dev, addr_bits);
|
||||
}
|
||||
#else
|
||||
static int hif_set_dma_coherent_mask(struct device *dev)
|
||||
static int hif_set_dma_coherent_mask(qdf_device_t osdev)
|
||||
{
|
||||
return qdf_set_dma_coherent_mask(dev, 37);
|
||||
return qdf_set_dma_coherent_mask(osdev->dev, 37);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -282,7 +282,7 @@ QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc,
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
ret = hif_set_dma_coherent_mask(dev);
|
||||
ret = hif_set_dma_coherent_mask(ol_sc->qdf_dev);
|
||||
if (ret) {
|
||||
HIF_ERROR("%s: failed to set dma mask error = %d",
|
||||
__func__, ret);
|
||||
|
@@ -945,17 +945,15 @@ void *htc_get_targetdef(HTC_HANDLE htc_handle)
|
||||
* Return: None
|
||||
*/
|
||||
void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr)
|
||||
{
|
||||
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle);
|
||||
|
||||
if (target->hif_dev != NULL) {
|
||||
if (target->hif_dev)
|
||||
hif_ipa_get_ce_resource(target->hif_dev,
|
||||
ce_sr_base_paddr,
|
||||
ce_sr_ring_size, ce_reg_paddr);
|
||||
}
|
||||
ce_sr, ce_sr_ring_size, ce_reg_paddr);
|
||||
}
|
||||
#endif /* IPA_OFFLOAD */
|
||||
|
||||
|
@@ -747,14 +747,12 @@ void htc_vote_link_down(HTC_HANDLE HTCHandle);
|
||||
void htc_vote_link_up(HTC_HANDLE HTCHandle);
|
||||
#ifdef IPA_OFFLOAD
|
||||
void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle,
|
||||
qdf_dma_addr_t *ce_sr_base_paddr,
|
||||
qdf_shared_mem_t **ce_sr,
|
||||
uint32_t *ce_sr_ring_size,
|
||||
qdf_dma_addr_t *ce_reg_paddr);
|
||||
#else
|
||||
#define htc_ipa_get_ce_resource(htc_handle, \
|
||||
ce_sr_base_paddr, \
|
||||
ce_sr_ring_size, \
|
||||
ce_reg_paddr) /* NO-OP */
|
||||
ce_sr, ce_sr_ring_size, ce_reg_paddr) /* NO-OP */
|
||||
#endif /* IPA_OFFLOAD */
|
||||
|
||||
#if defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT)
|
||||
|
Reference in New Issue
Block a user