qcacld-3.0: Add IPA UC WDI 1.0 backward compatibility

Update qcacld-3.0 for IPA UC WDI 1.0 backward compatibility for
Rome WIFI device.

Change-Id: I33084efd6dd3434d1f6baec49de43fab75c63e7f
CRs-fixed: 952114
Cette révision appartient à :
Manikandan Mohan
2015-11-18 16:27:37 -08:00
révisé par Satish Singh
Parent aa9459f319
révision d2f458f35c
4 fichiers modifiés avec 351 ajouts et 114 suppressions

4
Kbuild
Voir le fichier

@@ -914,6 +914,10 @@ CDEFINES += -DADRASTEA_SHADOW_REGISTERS
CDEFINES += -DADRASTEA_RRI_ON_DDR
endif
ifneq (y,$(filter y,$(CONFIG_CNSS_EOS) $(CONFIG_ICNSS) $(CONFIG_CNSS_ADRASTEA)))
CDEFINES += -DQCA_WIFI_2_0
endif
ifeq ($(CONFIG_WLAN_FASTPATH), y)
CDEFINES += -DWLAN_FEATURE_FASTPATH
endif

Voir le fichier

@@ -648,6 +648,100 @@ htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
* Return: 0 success
* A_NO_MEMORY No memory fail
*/
#ifdef QCA_WIFI_2_0
/* Rome Support only WDI 1.0 */
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
cdf_nbuf_t msg;
uint32_t *msg_word;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt)
return A_NO_MEMORY;
/* show that this is not a tx frame download
* (not required, but helpful)
*/
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
/* reserve room for HTC header */
msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
false);
if (!msg) {
htt_htc_pkt_free(pdev, pkt);
return A_NO_MEMORY;
}
/* set the length of the message */
cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
/* fill in the message contents */
msg_word = (uint32_t *) cdf_nbuf_data(msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
(unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
(unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
msg_word++;
*msg_word = 0;
HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(*msg_word,
(unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
cdf_nbuf_data(msg),
cdf_nbuf_len(msg),
pdev->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
return A_OK;
}
#else
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
struct htt_htc_pkt *pkt;
@@ -791,6 +885,7 @@ int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
return A_OK;
}
#endif
/**
* htt_h2t_ipa_uc_set_active() - Propagate WDI path enable/disable to firmware

Voir le fichier

@@ -2288,6 +2288,74 @@ fail1:
}
#ifdef IPA_OFFLOAD
#ifdef QCA_WIFI_3_0
/**
* htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
* @pdev: htt context
* @rx_ind_ring_elements: rx ring elements
*
* Return: 0 success
*/
int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
unsigned int rx_ind_ring_elements)
{
/* Allocate RX2 indication ring */
/* RX2 IND ring element
* 4bytes: pointer
* 2bytes: VDEV ID
* 2bytes: length */
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
rx_ind_ring_elements *
sizeof(struct ipa_uc_rx_ring_elem_t),
&pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
cdf_print("%s: RX IND RING alloc fail", __func__);
return -ENOBUFS;
}
/* RX indication ring size, by bytes */
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
/* Allocate RX process done index */
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
4,
&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx),
memctx));
if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
cdf_os_mem_free_consistent(
pdev->osdev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
return -ENOBUFS;
}
cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
return 0;
}
#else
int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
unsigned int rx_ind_ring_elements)
{
return 0;
}
#endif
/**
* htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
* @pdev: htt context
@@ -2298,6 +2366,12 @@ fail1:
int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
unsigned int rx_ind_ring_elements)
{
int ret = 0;
/* Allocate RX indication ring */
/* RX IND ring element
* 4bytes: pointer
* 2bytes: VDEV ID
* 2bytes: length */
pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
@@ -2341,37 +2415,20 @@ int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
}
cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
rx_ind_ring_elements *
sizeof(cdf_dma_addr_t),
&pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
cdf_print("%s: RX IND RING alloc fail", __func__);
return -ENOBUFS;
}
ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
return ret;
}
/* RX indication ring size, by bytes */
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
rx_ind_ring_elements * sizeof(cdf_dma_addr_t);
cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
/* Allocate RX process done index */
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
cdf_os_mem_alloc_consistent(
pdev->osdev,
4,
&pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx),
memctx));
if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
#ifdef QCA_WIFI_3_0
/**
* htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
* @pdev: htt context
*
* Return: None
*/
void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
{
if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
cdf_os_mem_free_consistent(
pdev->osdev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
@@ -2380,11 +2437,26 @@ int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
return -ENOBUFS;
}
cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
return 0;
if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
cdf_os_mem_free_consistent(
pdev->osdev,
4,
pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx),
memctx));
}
}
#else
void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
{
return;
}
#endif
int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
@@ -2406,32 +2478,12 @@ int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx.vaddr,
pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx),
memctx));
}
if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
cdf_os_mem_free_consistent(
pdev->osdev,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ind_ring_base),
memctx));
}
if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
cdf_os_mem_free_consistent(
pdev->osdev,
4,
pdev->ipa_uc_rx_rsc.
rx_ipa_prc_done_idx.vaddr,
pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
rx2_ipa_prc_done_idx),
memctx));
}
htt_rx_ipa_uc_free_wdi2_rsc(pdev);
return 0;
}
#endif /* IPA_OFFLOAD */

Voir le fichier

@@ -833,6 +833,149 @@ void htt_tx_desc_display(void *tx_desc)
#endif
#ifdef IPA_OFFLOAD
#ifdef QCA_WIFI_2_0
/**
* htt_tx_ipa_uc_wdi_tx_buf_alloc() - Alloc WDI TX buffers
* @pdev: htt context
* @uc_tx_buf_sz: TX buffer size
* @uc_tx_buf_cnt: TX Buffer count
* @uc_tx_partition_base: IPA UC TX partition base value
*
* Allocate WDI TX buffers. Also note Rome supports only WDI 1.0.
*
* Return: 0 success
*/
int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
unsigned int uc_tx_buf_sz,
unsigned int uc_tx_buf_cnt,
unsigned int uc_tx_partition_base)
{
unsigned int tx_buffer_count;
cdf_nbuf_t buffer_vaddr;
cdf_dma_addr_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
#define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 16
#define IPA_UC_TX_BUF_FRAG_HDR_OFFSET 32
ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
cdf_print("%s: TX BUF alloc fail, loop index: %d",
__func__, tx_buffer_count);
return tx_buffer_count;
}
/* Init buffer */
cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
header_ptr++;
/* PKT ID */
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
header_ptr++;
*header_ptr = (uint32_t) (buffer_paddr +
IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
header_ptr++;
*header_ptr = 0xFFFFFFFF;
/* FRAG Header */
header_ptr++;
*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
*ring_vaddr = buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
buffer_vaddr;
/* Memory barrier to ensure actual value updated */
ring_vaddr++;
}
return tx_buffer_count;
}
#else
int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
unsigned int uc_tx_buf_sz,
unsigned int uc_tx_buf_cnt,
unsigned int uc_tx_partition_base)
{
unsigned int tx_buffer_count;
cdf_nbuf_t buffer_vaddr;
uint32_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
#define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 20
#define IPA_UC_TX_BUF_FRAG_HDR_OFFSET 64
#define IPA_UC_TX_BUF_TSO_HDR_SIZE 6
ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
cdf_print("%s: TX BUF alloc fail, loop index: %d",
__func__, tx_buffer_count);
return tx_buffer_count;
}
/* Init buffer */
cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
header_ptr++;
/* PKT ID */
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
header_ptr++;
/* Frag Desc Pointer */
/* 64bits descriptor, Low 32bits */
*header_ptr = (uint32_t) (buffer_paddr +
IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
header_ptr++;
/* 64bits descriptor, high 32bits */
*header_ptr = 0;
header_ptr++;
/* chanreq, peerid */
*header_ptr = 0xFFFFFFFF;
header_ptr++;
/* FRAG Header */
/* 6 words TSO header */
header_ptr += IPA_UC_TX_BUF_TSO_HDR_SIZE;
*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
*ring_vaddr = buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
buffer_vaddr;
/* Memory barrier to ensure actual value updated */
ring_vaddr += 2;
}
return tx_buffer_count;
}
#endif
/**
* htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
* @pdev: htt context
@@ -848,11 +991,6 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
unsigned int uc_tx_buf_cnt,
unsigned int uc_tx_partition_base)
{
unsigned int tx_buffer_count;
cdf_nbuf_t buffer_vaddr;
cdf_dma_addr_t buffer_paddr;
uint32_t *header_ptr;
uint32_t *ring_vaddr;
int return_code = 0;
unsigned int tx_comp_ring_size;
@@ -900,61 +1038,9 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
/* Allocate TX buffers as many as possible */
for (tx_buffer_count = 0;
tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
uc_tx_buf_sz, 0, 4, false);
if (!buffer_vaddr) {
cdf_print("%s: TX BUF alloc fail, loop index: %d",
__func__, tx_buffer_count);
return 0;
}
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
/* Init buffer */
cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
/* HTT control header */
*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
header_ptr++;
/* PKT ID */
*header_ptr |= ((uint16_t) uc_tx_partition_base +
tx_buffer_count) << 16;
cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
header_ptr++;
/* Frag Desc Pointer */
/* 64bits descriptor, Low 32bits */
*header_ptr = (uint32_t) (buffer_paddr + 20);
header_ptr++;
/* 64bits descriptor, high 32bits */
*header_ptr = 0;
header_ptr++;
/* chanreq, peerid */
*header_ptr = 0xFFFFFFFF;
header_ptr++;
/* FRAG Header */
/* 6 words TSO header */
header_ptr += 6;
*header_ptr = buffer_paddr + 64;
*ring_vaddr = buffer_paddr;
pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
buffer_vaddr;
/* Memory barrier to ensure actual value updated */
ring_vaddr += 2;
}
pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
return 0;