qcacmn: Fix compilation issues HTC/HIF

Resolve compilation issues for references to cdf after rebase

Change-Id: I332566deb690fa7de68d89bf51b42f94f7a270a6
CRs-Fixed: 981187
This commit is contained in:
Chouhan, Anurag
2016-03-03 19:05:05 +05:30
committad av Gerrit - the friendly Code Review server
förälder 5776318d19
incheckning fc06aa9430
32 ändrade filer med 965 tillägg och 944 borttagningar

Visa fil

@@ -36,8 +36,8 @@ extern "C" {
#include "athdefs.h"
#include "a_types.h"
#include "osapi_linux.h"
#include "cdf_status.h"
#include "cdf_nbuf.h"
#include <qdf_status.h>
#include "qdf_nbuf.h"
#include "ol_if_athvar.h"
#include <linux/platform_device.h>
#ifdef HIF_PCI
@@ -259,8 +259,8 @@ void hif_detach_htc(struct hif_opaque_softc *scn);
/*
* API to handle HIF-specific BMI message exchanges, this API is synchronous
* and only allowed to be called from a context that can block (sleep) */
CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *scn,
cdf_dma_addr_t cmd, cdf_dma_addr_t rsp,
QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *scn,
qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
uint8_t *pSendMessage, uint32_t Length,
uint8_t *pResponseMessage,
uint32_t *pResponseLength, uint32_t TimeoutMS);
@@ -275,9 +275,9 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *scn,
*
* hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
*/
CDF_STATUS hif_diag_read_access(struct hif_opaque_softc *scn, uint32_t address,
QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *scn, uint32_t address,
uint32_t *data);
CDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn, uint32_t address,
QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *scn, uint32_t address,
uint8_t *data, int nbytes);
void hif_dump_target_memory(struct hif_opaque_softc *scn, void *ramdump_base,
uint32_t address, uint32_t size);
@@ -292,9 +292,9 @@ void hif_dump_target_memory(struct hif_opaque_softc *scn, void *ramdump_base,
*
* hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
*/
CDF_STATUS hif_diag_write_access(struct hif_opaque_softc *scn, uint32_t address,
QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *scn, uint32_t address,
uint32_t data);
CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address,
QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address,
uint8_t *data, int nbytes);
/*
@@ -379,9 +379,9 @@ void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int);
#ifdef IPA_OFFLOAD
void hif_ipa_get_ce_resource(struct hif_opaque_softc *scn,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr);
qdf_dma_addr_t *ce_reg_paddr);
#else
/**
* hif_ipa_get_ce_resource() - get uc resource on hif
@@ -397,9 +397,9 @@ void hif_ipa_get_ce_resource(struct hif_opaque_softc *scn,
* Return: None
*/
static inline void hif_ipa_get_ce_resource(struct hif_opaque_softc *scn,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
qdf_dma_addr_t *ce_reg_paddr)
{
return;
}
@@ -411,13 +411,13 @@ static inline void hif_ipa_get_ce_resource(struct hif_opaque_softc *scn,
struct hif_msg_callbacks {
void *Context;
/**< context meaningful to HTC */
CDF_STATUS (*txCompletionHandler)(void *Context, cdf_nbuf_t wbuf,
QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
uint32_t transferID,
uint32_t toeplitz_hash_result);
CDF_STATUS (*rxCompletionHandler)(void *Context, cdf_nbuf_t wbuf,
QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
uint8_t pipeID);
void (*txResourceAvailHandler)(void *context, uint8_t pipe);
void (*fwEventHandler)(void *context, CDF_STATUS status);
void (*fwEventHandler)(void *context, QDF_STATUS status);
};
#define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
@@ -443,13 +443,13 @@ typedef struct device hif_bus_id;
void hif_post_init(struct hif_opaque_softc *scn, void *hHTC,
struct hif_msg_callbacks *callbacks);
CDF_STATUS hif_start(struct hif_opaque_softc *scn);
QDF_STATUS hif_start(struct hif_opaque_softc *scn);
void hif_stop(struct hif_opaque_softc *scn);
void hif_flush_surprise_remove(struct hif_opaque_softc *scn);
void hif_dump(struct hif_opaque_softc *scn, uint8_t CmdId, bool start);
CDF_STATUS hif_send_head(struct hif_opaque_softc *scn, uint8_t PipeID,
QDF_STATUS hif_send_head(struct hif_opaque_softc *scn, uint8_t PipeID,
uint32_t transferID, uint32_t nbytes,
cdf_nbuf_t wbuf, uint32_t data_attr);
qdf_nbuf_t wbuf, uint32_t data_attr);
void hif_send_complete_check(struct hif_opaque_softc *scn, uint8_t PipeID,
int force);
void hif_cancel_deferred_target_sleep(struct hif_opaque_softc *scn);
@@ -473,13 +473,13 @@ void hif_reset_soc(struct hif_opaque_softc *scn);
void hif_disable_aspm(struct hif_opaque_softc *);
void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
int htc_endpoint);
struct hif_opaque_softc *hif_open(cdf_device_t cdf_ctx, uint32_t mode,
enum ath_hal_bus_type bus_type,
struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
enum qdf_bus_type bus_type,
struct hif_callbacks *cbk);
void hif_close(struct hif_opaque_softc *hif_ctx);
CDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
void *bdev, const hif_bus_id *bid,
enum ath_hal_bus_type bus_type,
enum qdf_bus_type bus_type,
enum hif_enable_type type);
void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
void hif_enable_power_gating(struct hif_opaque_softc *hif_ctx);

Visa fil

@@ -99,7 +99,7 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
#ifdef FEATURE_NAPI_DEBUG
#define NAPI_DEBUG(fmt, ...) \
cdf_print("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__);
qdf_print("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__);
#else
#define NAPI_DEBUG(fmt, ...) /* NO-OP */
#endif /* FEATURE NAPI_DEBUG */

Visa fil

@@ -66,7 +66,7 @@ static ssize_t ath_procfs_diag_read(struct file *file, char __user *buf,
int rv;
uint8_t *read_buffer = NULL;
read_buffer = cdf_mem_malloc(count);
read_buffer = qdf_mem_malloc(count);
if (NULL == read_buffer) {
HIF_ERROR("%s: cdf_mem_alloc failed", __func__);
return -ENOMEM;
@@ -86,12 +86,12 @@ static ssize_t ath_procfs_diag_read(struct file *file, char __user *buf,
}
if (copy_to_user(buf, read_buffer, count)) {
cdf_mem_free(read_buffer);
qdf_mem_free(read_buffer);
HIF_ERROR("%s: copy_to_user error in /proc/%s",
__func__, PROCFS_NAME);
return -EFAULT;
} else
cdf_mem_free(read_buffer);
qdf_mem_free(read_buffer);
if (rv == 0) {
return count;
@@ -108,13 +108,13 @@ static ssize_t ath_procfs_diag_write(struct file *file,
int rv;
uint8_t *write_buffer = NULL;
write_buffer = cdf_mem_malloc(count);
write_buffer = qdf_mem_malloc(count);
if (NULL == write_buffer) {
HIF_ERROR("%s: cdf_mem_alloc failed", __func__);
return -ENOMEM;
}
if (copy_from_user(write_buffer, buf, count)) {
cdf_mem_free(write_buffer);
qdf_mem_free(write_buffer);
HIF_ERROR("%s: copy_to_user error in /proc/%s",
__func__, PROCFS_NAME);
return -EFAULT;
@@ -134,7 +134,7 @@ static ssize_t ath_procfs_diag_write(struct file *file,
(uint8_t *)write_buffer, count);
}
cdf_mem_free(write_buffer);
qdf_mem_free(write_buffer);
if (rv == 0) {
return count;
} else {

Visa fil

@@ -73,7 +73,7 @@ struct CE_handle;
typedef void (*ce_send_cb)(struct CE_handle *copyeng,
void *per_ce_send_context,
void *per_transfer_send_context,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int sw_index,
@@ -88,7 +88,7 @@ typedef void (*ce_send_cb)(struct CE_handle *copyeng,
typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
void *per_CE_recv_context,
void *per_transfer_recv_context,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
@@ -142,7 +142,7 @@ struct CE_attr;
*/
int ce_send(struct CE_handle *copyeng,
void *per_transfer_send_context,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags,
@@ -177,7 +177,7 @@ void ce_sendlist_init(struct ce_sendlist *sendlist);
/* Append a simple buffer (address/length) to a sendlist. */
int ce_sendlist_buf_add(struct ce_sendlist *sendlist,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
unsigned int nbytes,
uint32_t flags, /* OR-ed with internal flags */
uint32_t user_flags);
@@ -211,7 +211,7 @@ int ce_sendlist_send(struct CE_handle *copyeng,
*/
int ce_recv_buf_enqueue(struct CE_handle *copyeng,
void *per_transfer_recv_context,
cdf_dma_addr_t buffer);
qdf_dma_addr_t buffer);
/*
* Register a Receive Callback function.
@@ -314,7 +314,7 @@ void ce_enable_msi(struct hif_softc *scn,
int ce_completed_recv_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp);
@@ -331,7 +331,7 @@ int ce_completed_recv_next(struct CE_handle *copyeng,
int ce_completed_send_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *sw_idx,
@@ -350,22 +350,22 @@ struct CE_handle *ce_init(struct hif_softc *scn,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
CDF_STATUS
QDF_STATUS
ce_revoke_recv_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp);
qdf_dma_addr_t *bufferp);
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
* this API.
*/
CDF_STATUS
QDF_STATUS
ce_cancel_send_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
uint32_t *toeplitz_hash_result);
@@ -433,9 +433,9 @@ struct ce_sendlist {
#ifdef IPA_OFFLOAD
void ce_ipa_get_resource(struct CE_handle *ce,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr);
qdf_dma_addr_t *ce_reg_paddr);
#else
/**
* ce_ipa_get_resource() - get uc resource on copyengine
@@ -453,9 +453,9 @@ void ce_ipa_get_resource(struct CE_handle *ce,
* Return: None
*/
static inline void ce_ipa_get_resource(struct CE_handle *ce,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
qdf_dma_addr_t *ce_reg_paddr)
{
return;
}

Visa fil

@@ -30,9 +30,10 @@
#include "athdefs.h"
#include "osapi_linux.h"
#include "targcfg.h"
#include "cdf_lock.h"
#include "cdf_status.h"
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "qdf_lock.h"
#include "qdf_status.h"
#include "qdf_status.h"
#include <qdf_atomic.h> /* qdf_atomic_read */
#include <targaddrs.h>
#include <bmi_msg.h>
#include "hif_io32.h"
@@ -42,7 +43,7 @@
#include <a_debug.h>
#include "hif_main.h"
#include "ce_api.h"
#include "cdf_trace.h"
#include "qdf_trace.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#else
@@ -63,12 +64,12 @@ enum {
struct BMI_transaction {
struct HIF_CE_state *hif_state;
cdf_semaphore_t bmi_transaction_sem;
qdf_semaphore_t bmi_transaction_sem;
uint8_t *bmi_request_host; /* Req BMI msg in Host addr space */
cdf_dma_addr_t bmi_request_CE; /* Req BMI msg in CE addr space */
qdf_dma_addr_t bmi_request_CE; /* Req BMI msg in CE addr space */
uint32_t bmi_request_length; /* Length of BMI request */
uint8_t *bmi_response_host; /* Rsp BMI msg in Host addr space */
cdf_dma_addr_t bmi_response_CE; /* Rsp BMI msg in CE addr space */
qdf_dma_addr_t bmi_response_CE; /* Rsp BMI msg in CE addr space */
unsigned int bmi_response_length; /* Length of received response */
unsigned int bmi_timeout_ms;
uint32_t bmi_transaction_flags; /* flags for the transcation */
@@ -80,22 +81,20 @@ struct BMI_transaction {
* straight buffer, not an sk_buff.
*/
void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
void *transfer_context, qdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int sw_index,
unsigned int hw_index, uint32_t toeplitz_hash_result)
{
struct BMI_transaction *transaction =
(struct BMI_transaction *)transfer_context;
struct hif_softc *scn = HIF_GET_SOFTC(transaction->hif_state);
#ifdef BMI_RSP_POLLING
/*
* Fix EV118783, Release a semaphore after sending
* no matter whether a response is been expecting now.
*/
cdf_semaphore_release(scn->cdf_dev,
&transaction->bmi_transaction_sem);
qdf_semaphore_release(&transaction->bmi_transaction_sem);
#else
/*
* If a response is anticipated, we'll complete the
@@ -109,36 +108,33 @@ void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
* never assume resp comes later then this */
if (!transaction->bmi_response_CE ||
(transaction->bmi_transaction_flags & BMI_RESP_RECV_DONE)) {
cdf_semaphore_release(scn->cdf_dev,
&transaction->bmi_transaction_sem);
qdf_semaphore_release(&transaction->bmi_transaction_sem);
}
#endif
}
#ifndef BMI_RSP_POLLING
void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
void *transfer_context, qdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int flags)
{
struct BMI_transaction *transaction =
(struct BMI_transaction *)transfer_context;
struct hif_softc *scn = HIF_GET_SOFTC(transaction->hif_state);
transaction->bmi_response_length = nbytes;
transaction->bmi_transaction_flags |= BMI_RESP_RECV_DONE;
/* when both send/recv are done, the sem can be released */
if (transaction->bmi_transaction_flags & BMI_REQ_SEND_DONE) {
cdf_semaphore_release(scn->cdf_dev,
&transaction->bmi_transaction_sem);
qdf_semaphore_release(&transaction->bmi_transaction_sem);
}
}
#endif
CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
cdf_dma_addr_t bmi_cmd_da,
cdf_dma_addr_t bmi_rsp_da,
QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
qdf_dma_addr_t bmi_cmd_da,
qdf_dma_addr_t bmi_rsp_da,
uint8_t *bmi_request,
uint32_t request_length,
uint8_t *bmi_response,
@@ -150,9 +146,9 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
struct HIF_CE_pipe_info *send_pipe_info =
&(hif_state->pipe_info[BMI_CE_NUM_TO_TARG]);
struct CE_handle *ce_send_hdl = send_pipe_info->ce_hdl;
cdf_dma_addr_t CE_request, CE_response = 0;
qdf_dma_addr_t CE_request, CE_response = 0;
struct BMI_transaction *transaction = NULL;
int status = CDF_STATUS_SUCCESS;
int status = QDF_STATUS_SUCCESS;
struct HIF_CE_pipe_info *recv_pipe_info =
&(hif_state->pipe_info[BMI_CE_NUM_TO_HOST]);
struct CE_handle *ce_recv = recv_pipe_info->ce_hdl;
@@ -160,16 +156,16 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
unsigned int transaction_id = 0xffff;
unsigned int user_flags = 0;
#ifdef BMI_RSP_POLLING
cdf_dma_addr_t buf;
qdf_dma_addr_t buf;
unsigned int completed_nbytes, id, flags;
int i;
#endif
transaction =
(struct BMI_transaction *)cdf_mem_malloc(sizeof(*transaction));
(struct BMI_transaction *)qdf_mem_malloc(sizeof(*transaction));
if (unlikely(!transaction)) {
HIF_ERROR("%s: no memory", __func__);
return CDF_STATUS_E_NOMEM;
return QDF_STATUS_E_NOMEM;
}
transaction_id = (mux_id & MUX_ID_MASK) |
(transaction_id & TRANSACTION_ID_MASK);
@@ -179,8 +175,8 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
A_TARGET_ACCESS_LIKELY(scn);
/* Initialize bmi_transaction_sem to block */
cdf_semaphore_init(&transaction->bmi_transaction_sem);
cdf_semaphore_acquire(scn->cdf_dev, &transaction->bmi_transaction_sem);
qdf_semaphore_init(&transaction->bmi_transaction_sem);
qdf_semaphore_acquire(&transaction->bmi_transaction_sem);
transaction->hif_state = hif_state;
transaction->bmi_request_host = bmi_request;
@@ -207,7 +203,7 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
transaction->bmi_response_CE = CE_response;
/* dma_cache_sync(dev, bmi_response,
BMI_DATASZ_MAX, DMA_FROM_DEVICE); */
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev,
qdf_mem_dma_sync_single_for_device(scn->qdf_dev,
CE_response,
BMI_DATASZ_MAX,
DMA_FROM_DEVICE);
@@ -220,14 +216,14 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
}
/* dma_cache_sync(dev, bmi_request, request_length, DMA_TO_DEVICE); */
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_request,
qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_request,
request_length, DMA_TO_DEVICE);
status =
ce_send(ce_send_hdl, transaction,
CE_request, request_length,
transaction_id, 0, user_flags);
ASSERT(status == CDF_STATUS_SUCCESS);
ASSERT(status == QDF_STATUS_SUCCESS);
/* NB: see hif_bmi_send_done */
/* TBDXXX: handle timeout */
@@ -235,8 +231,8 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
/* Wait for BMI request/response transaction to complete */
/* Always just wait for BMI request here if
* BMI_RSP_POLLING is defined */
while (cdf_semaphore_acquire
(scn->cdf_dev, &transaction->bmi_transaction_sem)) {
while (qdf_semaphore_acquire
(&transaction->bmi_transaction_sem)) {
/*need some break out condition(time out?) */
}
@@ -250,20 +246,20 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
while (ce_completed_recv_next(
ce_recv, NULL, NULL, &buf,
&completed_nbytes, &id,
&flags) != CDF_STATUS_SUCCESS) {
&flags) != QDF_STATUS_SUCCESS) {
if (i++ > BMI_RSP_TO_MILLISEC) {
HIF_ERROR("%s:error, can't get bmi response\n",
__func__);
status = CDF_STATUS_E_BUSY;
status = QDF_STATUS_E_BUSY;
break;
}
OS_DELAY(1000);
}
if ((status == CDF_STATUS_SUCCESS) && bmi_response_lengthp)
if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp)
*bmi_response_lengthp = completed_nbytes;
#else
if ((status == CDF_STATUS_SUCCESS) && bmi_response_lengthp) {
if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) {
*bmi_response_lengthp =
transaction->bmi_response_length;
}
@@ -277,8 +273,8 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
transaction->bmi_request_CE,
request_length, BUS_DMA_TODEVICE); */
if (status != CDF_STATUS_SUCCESS) {
cdf_dma_addr_t unused_buffer;
if (status != QDF_STATUS_SUCCESS) {
qdf_dma_addr_t unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
unsigned int toeplitz_hash_result;
@@ -290,6 +286,6 @@ CDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
}
A_TARGET_ACCESS_UNLIKELY(scn);
cdf_mem_free(transaction);
qdf_mem_free(transaction);
return status;
}

Visa fil

@@ -28,17 +28,17 @@
#ifndef __CE_BMI_H__
#define __CE_BMI_H__
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "cdf_lock.h"
#include <qdf_atomic.h> /* qdf_atomic_read */
#include "qdf_lock.h"
#include "ce_api.h"
#include "cepci.h"
void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
void *transfer_context, qdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int flags);
void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
void *transfer_context, cdf_dma_addr_t data,
void *transfer_context, qdf_dma_addr_t data,
unsigned int nbytes,
unsigned int transfer_id, unsigned int sw_index,
unsigned int hw_index, uint32_t toeplitz_hash_result);

Visa fil

@@ -30,9 +30,10 @@
#include "athdefs.h"
#include "osapi_linux.h"
#include "targcfg.h"
#include "cdf_lock.h"
#include "cdf_status.h"
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "qdf_lock.h"
#include "qdf_status.h"
#include "qdf_status.h"
#include <qdf_atomic.h> /* qdf_atomic_read */
#include <targaddrs.h>
#include <bmi_msg.h>
#include "hif_io32.h"
@@ -42,7 +43,7 @@
#include <a_debug.h>
#include "hif_main.h"
#include "ce_api.h"
#include "cdf_trace.h"
#include "qdf_trace.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
@@ -62,7 +63,7 @@ hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, void *ramdump_base,
A_TARGET_ACCESS_BEGIN(scn);
while (j < size) {
val = hif_read32_mb(scn->mem + loc + j);
cdf_mem_copy(temp, &val, 4);
qdf_mem_copy(temp, &val, 4);
j += 4;
temp += 4;
}
@@ -94,25 +95,25 @@ hif_dump_target_memory(struct hif_opaque_softc *hif_ctx, void *ramdump_base,
* at any moment.
*/
CDF_STATUS
QDF_STATUS
hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
uint8_t *data, int nbytes)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
CDF_STATUS status = CDF_STATUS_SUCCESS;
cdf_dma_addr_t buf;
QDF_STATUS status = QDF_STATUS_SUCCESS;
qdf_dma_addr_t buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
struct CE_handle *ce_diag;
cdf_dma_addr_t CE_data; /* Host buffer address in CE space */
cdf_dma_addr_t CE_data_base = 0;
qdf_dma_addr_t CE_data; /* Host buffer address in CE space */
qdf_dma_addr_t CE_data_base = 0;
void *data_buf = NULL;
int i;
unsigned int mux_id = 0;
unsigned int transaction_id = 0xffff;
cdf_dma_addr_t ce_phy_addr = address;
qdf_dma_addr_t ce_phy_addr = address;
unsigned int toeplitz_hash_result;
unsigned int user_flags = 0;
@@ -129,10 +130,10 @@ hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
if (address < DRAM_BASE_ADDRESS) {
if ((address & 0x3) || ((uintptr_t) data & 0x3))
return CDF_STATUS_E_INVAL;
return QDF_STATUS_E_INVAL;
while ((nbytes >= 4) &&
(CDF_STATUS_SUCCESS == (status =
(QDF_STATUS_SUCCESS == (status =
hif_diag_read_access(hif_ctx, address,
(uint32_t *)data)))) {
@@ -155,14 +156,14 @@ hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
data_buf = cdf_os_mem_alloc_consistent(scn->cdf_dev,
orig_nbytes, &CE_data_base, 0);
data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
orig_nbytes, &CE_data_base);
if (!data_buf) {
status = CDF_STATUS_E_NOMEM;
status = QDF_STATUS_E_NOMEM;
goto done;
}
cdf_mem_set(data_buf, orig_nbytes, 0);
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data_base,
qdf_mem_set(data_buf, orig_nbytes, 0);
qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base,
orig_nbytes, DMA_FROM_DEVICE);
remaining_bytes = orig_nbytes;
@@ -171,7 +172,7 @@ hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT);
{
status = ce_recv_buf_enqueue(ce_diag, NULL, CE_data);
if (status != CDF_STATUS_SUCCESS)
if (status != QDF_STATUS_SUCCESS)
goto done;
}
@@ -195,26 +196,26 @@ hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
status =
ce_send(ce_diag, NULL, ce_phy_addr, nbytes,
transaction_id, 0, user_flags);
if (status != CDF_STATUS_SUCCESS)
if (status != QDF_STATUS_SUCCESS)
goto done;
}
i = 0;
while (ce_completed_send_next(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id, NULL, NULL,
&toeplitz_hash_result) != CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
&toeplitz_hash_result) != QDF_STATUS_SUCCESS) {
qdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
status = QDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
if (buf != ce_phy_addr) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
@@ -222,19 +223,19 @@ hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
while (ce_completed_recv_next
(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id,
&flags) != CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
&flags) != QDF_STATUS_SUCCESS) {
qdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
status = QDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
if (buf != CE_data) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
@@ -246,20 +247,20 @@ hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
done:
A_TARGET_ACCESS_UNLIKELY(scn);
if (status == CDF_STATUS_SUCCESS)
cdf_mem_copy(data, data_buf, orig_nbytes);
if (status == QDF_STATUS_SUCCESS)
qdf_mem_copy(data, data_buf, orig_nbytes);
else
HIF_ERROR("%s failure (0x%x)", __func__, address);
if (data_buf)
cdf_os_mem_free_consistent(scn->cdf_dev, orig_nbytes,
data_buf, CE_data_base, 0);
qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
orig_nbytes, data_buf, CE_data_base, 0);
return status;
}
/* Read 4-byte aligned data from Target memory or register */
CDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
uint32_t address, uint32_t *data)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
@@ -273,28 +274,28 @@ CDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
*data = A_TARGET_READ(scn, address);
A_TARGET_ACCESS_END_RET(scn);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
}
CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
uint32_t address, uint8_t *data, int nbytes)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
CDF_STATUS status = CDF_STATUS_SUCCESS;
cdf_dma_addr_t buf;
QDF_STATUS status = QDF_STATUS_SUCCESS;
qdf_dma_addr_t buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
struct CE_handle *ce_diag;
void *data_buf = NULL;
cdf_dma_addr_t CE_data; /* Host buffer address in CE space */
cdf_dma_addr_t CE_data_base = 0;
qdf_dma_addr_t CE_data; /* Host buffer address in CE space */
qdf_dma_addr_t CE_data_base = 0;
int i;
unsigned int mux_id = 0;
unsigned int transaction_id = 0xffff;
cdf_dma_addr_t ce_phy_addr = address;
qdf_dma_addr_t ce_phy_addr = address;
unsigned int toeplitz_hash_result;
unsigned int user_flags = 0;
@@ -314,16 +315,16 @@ CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
data_buf = cdf_os_mem_alloc_consistent(scn->cdf_dev,
orig_nbytes, &CE_data_base, 0);
data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
orig_nbytes, &CE_data_base);
if (!data_buf) {
status = A_NO_MEMORY;
goto done;
}
/* Copy caller's data to allocated DMA buf */
cdf_mem_copy(data_buf, data, orig_nbytes);
cdf_os_mem_dma_sync_single_for_device(scn->cdf_dev, CE_data_base,
qdf_mem_copy(data_buf, data, orig_nbytes);
qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base,
orig_nbytes, DMA_TO_DEVICE);
/*
@@ -348,7 +349,7 @@ CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
{ /* Set up to receive directly into Target(!) address */
status = ce_recv_buf_enqueue(ce_diag,
NULL, ce_phy_addr);
if (status != CDF_STATUS_SUCCESS)
if (status != QDF_STATUS_SUCCESS)
goto done;
}
@@ -359,9 +360,9 @@ CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
*/
status =
ce_send(ce_diag, NULL,
(cdf_dma_addr_t) CE_data, nbytes,
(qdf_dma_addr_t) CE_data, nbytes,
transaction_id, 0, user_flags);
if (status != CDF_STATUS_SUCCESS)
if (status != QDF_STATUS_SUCCESS)
goto done;
}
@@ -369,21 +370,21 @@ CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
while (ce_completed_send_next(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id,
NULL, NULL, &toeplitz_hash_result) !=
CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
QDF_STATUS_SUCCESS) {
qdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
status = QDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
if (buf != CE_data) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
@@ -391,21 +392,21 @@ CDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
while (ce_completed_recv_next
(ce_diag, NULL, NULL, &buf,
&completed_nbytes, &id,
&flags) != CDF_STATUS_SUCCESS) {
cdf_mdelay(1);
&flags) != QDF_STATUS_SUCCESS) {
qdf_mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
status = CDF_STATUS_E_BUSY;
status = QDF_STATUS_E_BUSY;
goto done;
}
}
if (nbytes != completed_nbytes) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
if (buf != ce_phy_addr) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
@@ -418,11 +419,11 @@ done:
A_TARGET_ACCESS_UNLIKELY(scn);
if (data_buf) {
cdf_os_mem_free_consistent(scn->cdf_dev, orig_nbytes,
data_buf, CE_data_base, 0);
qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
orig_nbytes, data_buf, CE_data_base, 0);
}
if (status != CDF_STATUS_SUCCESS) {
if (status != QDF_STATUS_SUCCESS) {
HIF_ERROR("%s failure (0x%llu)", __func__,
(uint64_t)ce_phy_addr);
}
@@ -431,7 +432,7 @@ done:
}
/* Write 4B data to Target memory or register */
CDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
uint32_t address, uint32_t data)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
@@ -448,6 +449,6 @@ CDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
A_TARGET_WRITE(scn, address, data);
A_TARGET_ACCESS_END_RET(scn);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
}

Visa fil

@@ -76,7 +76,7 @@ struct CE_ring_state {
/* Start of DMA-coherent area reserved for descriptors */
void *base_addr_owner_space_unaligned; /* Host address space */
cdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
/*
* Actual start of descriptors.
@@ -84,7 +84,7 @@ struct CE_ring_state {
* Points into reserved DMA-coherent area, above.
*/
void *base_addr_owner_space; /* Host address space */
cdf_dma_addr_t base_addr_CE_space; /* CE address space */
qdf_dma_addr_t base_addr_CE_space; /* CE address space */
/*
* Start of shadow copy of descriptors, within regular memory.
* Aligned to descriptor-size boundary.
@@ -130,7 +130,7 @@ struct CE_state {
struct CE_ring_state *dest_ring;
atomic_t rx_pending;
cdf_spinlock_t ce_index_lock;
qdf_spinlock_t ce_index_lock;
bool force_break; /* Flag to indicate whether to
* break out the DPC context */
@@ -139,7 +139,7 @@ struct CE_state {
* DPC routine */
/* epping */
bool timer_inited;
cdf_softirq_timer_t poll_timer;
qdf_timer_t poll_timer;
void (*lro_flush_cb)(void *);
void *lro_data;
};
@@ -149,11 +149,11 @@ struct CE_state {
#ifdef QCA_WIFI_3_0
#define HIF_CE_DESC_ADDR_TO_DMA(desc) \
(cdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
((uint64_t)((desc)->buffer_addr_hi & 0x1F) << 32)))
#else
#define HIF_CE_DESC_ADDR_TO_DMA(desc) \
(cdf_dma_addr_t)((desc)->buffer_addr)
(qdf_dma_addr_t)((desc)->buffer_addr)
#endif
#ifdef QCA_WIFI_3_0

Filskillnaden har hållits tillbaka eftersom den är för stor Load Diff

Visa fil

@@ -28,8 +28,8 @@
#ifndef __CE_H__
#define __CE_H__
#include "cdf_atomic.h"
#include "cdf_lock.h"
#include "qdf_atomic.h"
#include "qdf_lock.h"
#include "hif_main.h"
#define CE_HTT_T2H_MSG 1
@@ -78,10 +78,10 @@ struct HIF_CE_pipe_info {
/* Instantaneous number of receive buffers that should be posted */
atomic_t recv_bufs_needed;
cdf_size_t buf_sz;
cdf_spinlock_t recv_bufs_needed_lock;
qdf_size_t buf_sz;
qdf_spinlock_t recv_bufs_needed_lock;
cdf_spinlock_t completion_freeq_lock;
qdf_spinlock_t completion_freeq_lock;
/* Limit the number of outstanding send requests. */
int num_sends_allowed;
@@ -111,11 +111,11 @@ struct HIF_CE_state {
struct hif_softc ol_sc;
bool started;
struct ce_tasklet_entry tasklets[CE_COUNT_MAX];
cdf_spinlock_t keep_awake_lock;
qdf_spinlock_t keep_awake_lock;
unsigned int keep_awake_count;
bool verified_awake;
bool fake_sleep;
cdf_softirq_timer_t sleep_timer;
qdf_timer_t sleep_timer;
bool sleep_timer_init;
unsigned long sleep_ticks;

Visa fil

@@ -35,7 +35,7 @@
#include "ce_main.h"
#include "ce_internal.h"
#include "ce_reg.h"
#include "cdf_lock.h"
#include "qdf_lock.h"
#include "regtable.h"
#include "epping_main.h"
#include "hif_main.h"
@@ -97,7 +97,7 @@ struct hif_ce_desc_event {
/* max history to record per copy engine */
#define HIF_CE_HISTORY_MAX 512
cdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
@@ -113,11 +113,11 @@ struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
* trying to access the array, full locking of the recording process would
* be needed to have sane logging.
*/
static int get_next_record_index(cdf_atomic_t *table_index, int array_size)
static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
{
int record_index = cdf_atomic_inc_return(table_index);
int record_index = qdf_atomic_inc_return(table_index);
if (record_index == array_size)
cdf_atomic_sub(array_size, table_index);
qdf_atomic_sub(array_size, table_index);
while (record_index >= array_size)
record_index -= array_size;
@@ -149,8 +149,8 @@ void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
if (cbk && cbk->get_monotonic_boottime)
event->time = cbk->get_monotonic_boottime();
else
event->time = ((uint64_t)cdf_system_ticks_to_msecs(
cdf_system_ticks()) * 1000);
event->time = ((uint64_t)qdf_system_ticks_to_msecs(
qdf_system_ticks()) * 1000);
if (descriptor != NULL)
event->descriptor = *descriptor;
@@ -169,7 +169,7 @@ void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
*/
void ce_init_ce_desc_event_log(int ce_id, int size)
{
cdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
}
#else
void hif_record_ce_desc_event(struct hif_softc *scn,
@@ -232,7 +232,7 @@ int
ce_completed_send_next_nolock(struct CE_state *CE_state,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *sw_idx, unsigned int *hw_idx,
@@ -275,7 +275,7 @@ void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
int
ce_send_nolock(struct CE_handle *copyeng,
void *per_transfer_context,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
uint32_t nbytes,
uint32_t transfer_id,
uint32_t flags,
@@ -295,7 +295,7 @@ ce_send_nolock(struct CE_handle *copyeng,
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
A_TARGET_ACCESS_END_RET(scn);
return status;
}
@@ -357,7 +357,7 @@ ce_send_nolock(struct CE_handle *copyeng,
src_ring->write_index);
src_ring->write_index = write_index;
status = CDF_STATUS_SUCCESS;
status = QDF_STATUS_SUCCESS;
}
A_TARGET_ACCESS_END_RET(scn);
@@ -367,7 +367,7 @@ ce_send_nolock(struct CE_handle *copyeng,
int
ce_send(struct CE_handle *copyeng,
void *per_transfer_context,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
uint32_t nbytes,
uint32_t transfer_id,
uint32_t flags,
@@ -376,10 +376,10 @@ ce_send(struct CE_handle *copyeng,
struct CE_state *CE_state = (struct CE_state *)copyeng;
int status;
cdf_spin_lock_bh(&CE_state->ce_index_lock);
qdf_spin_lock_bh(&CE_state->ce_index_lock);
status = ce_send_nolock(copyeng, per_transfer_context, buffer, nbytes,
transfer_id, flags, user_flag);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -397,7 +397,7 @@ void ce_sendlist_init(struct ce_sendlist *sendlist)
int
ce_sendlist_buf_add(struct ce_sendlist *sendlist,
cdf_dma_addr_t buffer,
qdf_dma_addr_t buffer,
uint32_t nbytes,
uint32_t flags,
uint32_t user_flags)
@@ -407,8 +407,8 @@ ce_sendlist_buf_add(struct ce_sendlist *sendlist,
struct ce_sendlist_item *item;
if (num_items >= CE_SENDLIST_ITEMS_MAX) {
CDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
return CDF_STATUS_E_RESOURCES;
QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
return QDF_STATUS_E_RESOURCES;
}
item = &sl->item[num_items];
@@ -418,7 +418,7 @@ ce_sendlist_buf_add(struct ce_sendlist *sendlist,
item->flags = flags;
item->user_flags = user_flags;
sl->num_items = num_items + 1;
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
int
@@ -435,9 +435,9 @@ ce_sendlist_send(struct CE_handle *copyeng,
unsigned int sw_index;
unsigned int write_index;
CDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
cdf_spin_lock_bh(&CE_state->ce_index_lock);
qdf_spin_lock_bh(&CE_state->ce_index_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
@@ -450,28 +450,28 @@ ce_sendlist_send(struct CE_handle *copyeng,
for (i = 0; i < num_items - 1; i++) {
item = &sl->item[i];
/* TBDXXX: Support extensible sendlist_types? */
CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
status = ce_send_nolock(copyeng, CE_SENDLIST_ITEM_CTXT,
(cdf_dma_addr_t) item->data,
(qdf_dma_addr_t) item->data,
item->u.nbytes, transfer_id,
item->flags | CE_SEND_FLAG_GATHER,
item->user_flags);
CDF_ASSERT(status == CDF_STATUS_SUCCESS);
QDF_ASSERT(status == QDF_STATUS_SUCCESS);
}
/* provide valid context pointer for final item */
item = &sl->item[i];
/* TBDXXX: Support extensible sendlist_types? */
CDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
status = ce_send_nolock(copyeng, per_transfer_context,
(cdf_dma_addr_t) item->data,
(qdf_dma_addr_t) item->data,
item->u.nbytes,
transfer_id, item->flags,
item->user_flags);
CDF_ASSERT(status == CDF_STATUS_SUCCESS);
QDF_ASSERT(status == QDF_STATUS_SUCCESS);
NBUF_UPDATE_TX_PKT_COUNT((cdf_nbuf_t)per_transfer_context,
NBUF_TX_PKT_CE);
DPTRACE(cdf_dp_trace((cdf_nbuf_t)per_transfer_context,
CDF_DP_TRACE_CE_PACKET_PTR_RECORD,
DPTRACE(qdf_dp_trace((cdf_nbuf_t)per_transfer_context,
QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
(uint8_t *)(((cdf_nbuf_t)per_transfer_context)->data),
sizeof(((cdf_nbuf_t)per_transfer_context)->data)));
} else {
@@ -482,7 +482,7 @@ ce_sendlist_send(struct CE_handle *copyeng,
* the entire request at once, punt it back to the caller.
*/
}
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -543,7 +543,7 @@ int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
uint64_t dma_addr;
uint32_t user_flags = 0;
cdf_spin_lock_bh(&ce_state->ce_index_lock);
qdf_spin_lock_bh(&ce_state->ce_index_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
@@ -606,7 +606,7 @@ int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
/*
* Clear packet offset for all but the first CE desc.
*/
user_flags &= ~CDF_CE_TX_PKT_OFFSET_BIT_M;
user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
shadow_src_desc->meta_data = transfer_id;
@@ -641,7 +641,7 @@ int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
}
}
cdf_spin_unlock_bh(&ce_state->ce_index_lock);
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
/*
* If all packets in the array are transmitted,
@@ -655,7 +655,7 @@ int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
int
ce_recv_buf_enqueue(struct CE_handle *copyeng,
void *per_recv_context, cdf_dma_addr_t buffer)
void *per_recv_context, qdf_dma_addr_t buffer)
{
int status;
struct CE_state *CE_state = (struct CE_state *)copyeng;
@@ -668,13 +668,13 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
uint64_t dma_addr = buffer;
struct hif_softc *scn = CE_state->scn;
cdf_spin_lock_bh(&CE_state->ce_index_lock);
qdf_spin_lock_bh(&CE_state->ce_index_lock);
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
A_TARGET_ACCESS_BEGIN_RET_EXT(scn, val);
if (val == -1) {
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return val;
}
@@ -704,17 +704,17 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
dest_ring->write_index = write_index;
status = CDF_STATUS_SUCCESS;
status = QDF_STATUS_SUCCESS;
} else {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
}
A_TARGET_ACCESS_END_RET_EXT(scn, val);
if (val == -1) {
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return val;
}
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -755,10 +755,10 @@ unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
unsigned int sw_index;
unsigned int write_index;
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
@@ -771,10 +771,10 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
unsigned int sw_index;
unsigned int write_index;
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
sw_index = dest_ring->sw_index;
write_index = dest_ring->write_index;
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
@@ -804,9 +804,9 @@ unsigned int ce_send_entries_done(struct CE_handle *copyeng)
struct CE_state *CE_state = (struct CE_state *)copyeng;
unsigned int nentries;
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
nentries = ce_send_entries_done_nolock(CE_state->scn, CE_state);
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
return nentries;
}
@@ -836,9 +836,9 @@ unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
struct CE_state *CE_state = (struct CE_state *)copyeng;
unsigned int nentries;
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
nentries = ce_recv_entries_done_nolock(CE_state->scn, CE_state);
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
return nentries;
}
@@ -857,7 +857,7 @@ int
ce_completed_recv_next_nolock(struct CE_state *CE_state,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
@@ -886,7 +886,7 @@ ce_completed_recv_next_nolock(struct CE_state *CE_state,
* corresponding descriptor has completed. We treat this
* as a descriptor that is not yet done.
*/
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
goto done;
}
@@ -916,7 +916,7 @@ ce_completed_recv_next_nolock(struct CE_state *CE_state,
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
dest_ring->sw_index = sw_index;
status = CDF_STATUS_SUCCESS;
status = QDF_STATUS_SUCCESS;
done:
return status;
@@ -926,45 +926,45 @@ int
ce_completed_recv_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp, unsigned int *flagsp)
{
struct CE_state *CE_state = (struct CE_state *)copyeng;
int status;
cdf_spin_lock_bh(&CE_state->ce_index_lock);
qdf_spin_lock_bh(&CE_state->ce_index_lock);
status =
ce_completed_recv_next_nolock(CE_state, per_CE_contextp,
per_transfer_contextp, bufferp,
nbytesp, transfer_idp, flagsp);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
/* NB: Modeled after ce_completed_recv_next_nolock */
CDF_STATUS
QDF_STATUS
ce_revoke_recv_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp, cdf_dma_addr_t *bufferp)
void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
{
struct CE_state *CE_state;
struct CE_ring_state *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
CDF_STATUS status;
QDF_STATUS status;
struct hif_softc *scn;
CE_state = (struct CE_state *)copyeng;
dest_ring = CE_state->dest_ring;
if (!dest_ring) {
return CDF_STATUS_E_FAILURE;
return QDF_STATUS_E_FAILURE;
}
scn = CE_state->scn;
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
nentries_mask = dest_ring->nentries_mask;
sw_index = dest_ring->sw_index;
write_index = dest_ring->write_index;
@@ -992,11 +992,11 @@ ce_revoke_recv_next(struct CE_handle *copyeng,
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
dest_ring->sw_index = sw_index;
status = CDF_STATUS_SUCCESS;
status = QDF_STATUS_SUCCESS;
} else {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
}
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
return status;
}
@@ -1009,14 +1009,14 @@ int
ce_completed_send_next_nolock(struct CE_state *CE_state,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *sw_idx,
unsigned int *hw_idx,
uint32_t *toeplitz_hash_result)
{
int status = CDF_STATUS_E_FAILURE;
int status = QDF_STATUS_E_FAILURE;
struct CE_ring_state *src_ring = CE_state->src_ring;
uint32_t ctrl_addr = CE_state->ctrl_addr;
unsigned int nentries_mask = src_ring->nentries_mask;
@@ -1085,18 +1085,18 @@ ce_completed_send_next_nolock(struct CE_state *CE_state,
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
src_ring->sw_index = sw_index;
status = CDF_STATUS_SUCCESS;
status = QDF_STATUS_SUCCESS;
}
return status;
}
/* NB: Modeled after ce_completed_send_next */
CDF_STATUS
QDF_STATUS
ce_cancel_send_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
uint32_t *toeplitz_hash_result)
@@ -1106,17 +1106,17 @@ ce_cancel_send_next(struct CE_handle *copyeng,
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
CDF_STATUS status;
QDF_STATUS status;
struct hif_softc *scn;
CE_state = (struct CE_state *)copyeng;
src_ring = CE_state->src_ring;
if (!src_ring) {
return CDF_STATUS_E_FAILURE;
return QDF_STATUS_E_FAILURE;
}
scn = CE_state->scn;
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
nentries_mask = src_ring->nentries_mask;
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
@@ -1151,11 +1151,11 @@ ce_cancel_send_next(struct CE_handle *copyeng,
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
src_ring->sw_index = sw_index;
status = CDF_STATUS_SUCCESS;
status = QDF_STATUS_SUCCESS;
} else {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
}
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
return status;
}
@@ -1167,7 +1167,7 @@ int
ce_completed_send_next(struct CE_handle *copyeng,
void **per_CE_contextp,
void **per_transfer_contextp,
cdf_dma_addr_t *bufferp,
qdf_dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *sw_idx,
@@ -1177,13 +1177,13 @@ ce_completed_send_next(struct CE_handle *copyeng,
struct CE_state *CE_state = (struct CE_state *)copyeng;
int status;
cdf_spin_lock_bh(&CE_state->ce_index_lock);
qdf_spin_lock_bh(&CE_state->ce_index_lock);
status =
ce_completed_send_next_nolock(CE_state, per_CE_contextp,
per_transfer_contextp, bufferp,
nbytesp, transfer_idp, sw_idx,
hw_idx, toeplitz_hash_result);
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
return status;
}
@@ -1202,7 +1202,7 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
{
void *CE_context;
void *transfer_context;
cdf_dma_addr_t buf;
qdf_dma_addr_t buf;
unsigned int nbytes;
unsigned int id;
unsigned int sw_idx, hw_idx;
@@ -1226,7 +1226,7 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
* addressed by change spin_lock to spin_lock_bh also.
*/
cdf_spin_lock_bh(&CE_state->ce_index_lock);
qdf_spin_lock_bh(&CE_state->ce_index_lock);
if (CE_state->send_cb) {
{
@@ -1238,9 +1238,9 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
&transfer_context, &buf,
&nbytes, &id, &sw_idx, &hw_idx,
&toeplitz_hash_result) ==
CDF_STATUS_SUCCESS) {
QDF_STATUS_SUCCESS) {
if (ce_id != CE_HTT_H2T_MSG) {
cdf_spin_unlock_bh(
qdf_spin_unlock_bh(
&CE_state->ce_index_lock);
CE_state->send_cb(
(struct CE_handle *)
@@ -1248,24 +1248,24 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
transfer_context, buf,
nbytes, id, sw_idx, hw_idx,
toeplitz_hash_result);
cdf_spin_lock_bh(
qdf_spin_lock_bh(
&CE_state->ce_index_lock);
} else {
struct HIF_CE_pipe_info *pipe_info =
(struct HIF_CE_pipe_info *)
CE_context;
cdf_spin_lock_bh(&pipe_info->
qdf_spin_lock_bh(&pipe_info->
completion_freeq_lock);
pipe_info->num_sends_allowed++;
cdf_spin_unlock_bh(&pipe_info->
qdf_spin_unlock_bh(&pipe_info->
completion_freeq_lock);
}
}
}
}
cdf_spin_unlock_bh(&CE_state->ce_index_lock);
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
NULL, NULL, 0);
@@ -1299,7 +1299,7 @@ int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
uint32_t ctrl_addr = CE_state->ctrl_addr;
void *CE_context;
void *transfer_context;
cdf_dma_addr_t buf;
qdf_dma_addr_t buf;
unsigned int nbytes;
unsigned int id;
unsigned int flags;
@@ -1315,7 +1315,7 @@ int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
return 0; /* no work done */
}
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
/* Clear force_break flag and re-initialize receive_count to 0 */
@@ -1331,8 +1331,8 @@ more_completions:
while (ce_completed_recv_next_nolock
(CE_state, &CE_context, &transfer_context,
&buf, &nbytes, &id, &flags) ==
CDF_STATUS_SUCCESS) {
cdf_spin_unlock(&CE_state->ce_index_lock);
QDF_STATUS_SUCCESS) {
qdf_spin_unlock(&CE_state->ce_index_lock);
CE_state->recv_cb((struct CE_handle *)CE_state,
CE_context, transfer_context, buf,
nbytes, id, flags);
@@ -1354,8 +1354,8 @@ more_completions:
/* Break the receive processes by
* force if force_break set up
*/
if (cdf_unlikely(CE_state->force_break)) {
cdf_atomic_set(&CE_state->rx_pending, 1);
if (qdf_unlikely(CE_state->force_break)) {
qdf_atomic_set(&CE_state->rx_pending, 1);
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
if (Q_TARGET_ACCESS_END(scn) < 0)
@@ -1363,7 +1363,7 @@ more_completions:
CE_state->receive_count);
return CE_state->receive_count;
}
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
}
}
@@ -1383,24 +1383,24 @@ more_completions:
(CE_state, &CE_context,
&transfer_context, &buf, &nbytes,
&id, &sw_idx, &hw_idx,
&toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
&toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
if (CE_id != CE_HTT_H2T_MSG ||
WLAN_IS_EPPING_ENABLED(mode)) {
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
CE_state->send_cb((struct CE_handle *)CE_state,
CE_context, transfer_context,
buf, nbytes, id, sw_idx,
hw_idx, toeplitz_hash_result);
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
} else {
struct HIF_CE_pipe_info *pipe_info =
(struct HIF_CE_pipe_info *)CE_context;
cdf_spin_lock(&pipe_info->
qdf_spin_lock(&pipe_info->
completion_freeq_lock);
pipe_info->num_sends_allowed++;
cdf_spin_unlock(&pipe_info->
qdf_spin_unlock(&pipe_info->
completion_freeq_lock);
}
}
@@ -1409,13 +1409,13 @@ more_completions:
(CE_state, &CE_context,
&transfer_context, &buf, &nbytes,
&id, &sw_idx, &hw_idx,
&toeplitz_hash_result) == CDF_STATUS_SUCCESS) {
cdf_spin_unlock(&CE_state->ce_index_lock);
&toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
qdf_spin_unlock(&CE_state->ce_index_lock);
CE_state->send_cb((struct CE_handle *)CE_state,
CE_context, transfer_context, buf,
nbytes, id, sw_idx, hw_idx,
toeplitz_hash_result);
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
}
#endif /*ATH_11AC_TXCOMPACT */
}
@@ -1425,7 +1425,7 @@ more_watermarks:
CE_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
if (CE_int_status & CE_WATERMARK_MASK) {
if (CE_state->watermark_cb) {
cdf_spin_unlock(&CE_state->ce_index_lock);
qdf_spin_unlock(&CE_state->ce_index_lock);
/* Convert HW IS bits to software flags */
flags =
(CE_int_status & CE_WATERMARK_MASK) >>
@@ -1434,7 +1434,7 @@ more_watermarks:
CE_state->
watermark_cb((struct CE_handle *)CE_state,
CE_state->wm_context, flags);
cdf_spin_lock(&CE_state->ce_index_lock);
qdf_spin_lock(&CE_state->ce_index_lock);
}
}
}
@@ -1494,8 +1494,8 @@ more_watermarks:
}
}
cdf_spin_unlock(&CE_state->ce_index_lock);
cdf_atomic_set(&CE_state->rx_pending, 0);
qdf_spin_unlock(&CE_state->ce_index_lock);
qdf_atomic_set(&CE_state->rx_pending, 0);
if (Q_TARGET_ACCESS_END(scn) < 0)
HIF_ERROR("<--[premature rc=%d]\n", CE_state->receive_count);
@@ -1514,11 +1514,11 @@ void ce_per_engine_service_any(int irq, struct hif_softc *scn)
uint32_t intr_summary;
A_TARGET_ACCESS_BEGIN(scn);
if (!cdf_atomic_read(&scn->tasklet_from_intr)) {
if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
if (cdf_atomic_read(&CE_state->rx_pending)) {
cdf_atomic_set(&CE_state->rx_pending, 0);
if (qdf_atomic_read(&CE_state->rx_pending)) {
qdf_atomic_set(&CE_state->rx_pending, 0);
ce_per_engine_service(scn, CE_id);
}
}
@@ -1718,11 +1718,11 @@ void ce_pkt_dl_len_set(void *hif_sc, u_int32_t pkt_download_len)
struct hif_softc *sc = (struct hif_softc *)(hif_sc);
struct CE_state *ce_state = sc->ce_id_to_state[CE_HTT_H2T_MSG];
cdf_assert_always(ce_state);
qdf_assert_always(ce_state);
ce_state->download_len = pkt_download_len;
cdf_print("%s CE %d Pkt download length %d", __func__,
qdf_print("%s CE %d Pkt download length %d", __func__,
ce_state->id, ce_state->download_len);
}
#else
@@ -1737,7 +1737,7 @@ bool ce_get_rx_pending(struct hif_softc *scn)
for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
if (cdf_atomic_read(&CE_state->rx_pending))
if (qdf_atomic_read(&CE_state->rx_pending))
return true;
}
@@ -1754,7 +1754,7 @@ bool ce_get_rx_pending(struct hif_softc *scn)
bool ce_check_rx_pending(struct hif_softc *scn, int ce_id)
{
struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
if (cdf_atomic_read(&CE_state->rx_pending))
if (qdf_atomic_read(&CE_state->rx_pending))
return true;
else
return false;
@@ -1813,14 +1813,14 @@ void ce_enable_msi(struct hif_softc *scn, unsigned int CE_id,
* Return: None
*/
void ce_ipa_get_resource(struct CE_handle *ce,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
qdf_dma_addr_t *ce_reg_paddr)
{
struct CE_state *CE_state = (struct CE_state *)ce;
uint32_t ring_loop;
struct CE_src_desc *ce_desc;
cdf_dma_addr_t phy_mem_base;
qdf_dma_addr_t phy_mem_base;
struct hif_softc *scn = CE_state->scn;
if (CE_RUNNING != CE_state->state) {

Visa fil

@@ -32,9 +32,9 @@
#include <linux/if_arp.h>
#include "a_types.h"
#include "athdefs.h"
#include "cdf_lock.h"
#include "cdf_types.h"
#include "cdf_status.h"
#include "qdf_lock.h"
#include "qdf_types.h"
#include "qdf_status.h"
#include "regtable.h"
#include "hif.h"
#include "hif_io32.h"
@@ -199,10 +199,10 @@ static void ce_tasklet(unsigned long data)
hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
HIF_CE_TASKLET_ENTRY, NULL, NULL, 0);
if (cdf_atomic_read(&scn->link_suspended)) {
if (qdf_atomic_read(&scn->link_suspended)) {
HIF_ERROR("%s: ce %d tasklet fired after link suspend.",
__func__, tasklet_entry->ce_id);
CDF_BUG(0);
QDF_BUG(0);
}
ce_per_engine_service(scn, tasklet_entry->ce_id);
@@ -229,7 +229,7 @@ static void ce_tasklet(unsigned long data)
hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
NULL, NULL, 0);
cdf_atomic_dec(&scn->active_tasklet_cnt);
qdf_atomic_dec(&scn->active_tasklet_cnt);
}
/**
@@ -270,7 +270,7 @@ void ce_tasklet_kill(struct hif_softc *scn)
tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
hif_ce_state->tasklets[i].inited = false;
}
cdf_atomic_set(&scn->active_tasklet_cnt, 0);
qdf_atomic_set(&scn->active_tasklet_cnt, 0);
}
/**
* ce_irq_handler() - ce_irq_handler
@@ -303,7 +303,7 @@ static irqreturn_t ce_irq_handler(int irq, void *context)
#endif
ce_irq_disable(scn, ce_id);
ce_irq_status(scn, ce_id, &host_status);
cdf_atomic_inc(&scn->active_tasklet_cnt);
qdf_atomic_inc(&scn->active_tasklet_cnt);
hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
if (hif_napi_enabled(hif_hdl, ce_id))
hif_napi_schedule(hif_hdl, ce_id);
@@ -340,16 +340,16 @@ const char *ce_name[ICNSS_MAX_IRQ_REGISTRATIONS] = {
* Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
* unregister for copy engine x.
*
* Return: CDF_STATUS
* Return: QDF_STATUS
*/
CDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
{
int id;
int ret;
if (hif_ce_state == NULL) {
HIF_WARN("%s: hif_ce_state = NULL", __func__);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
for (id = 0; id < CE_COUNT_MAX; id++) {
if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
@@ -361,7 +361,7 @@ CDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
__func__, id, ret);
}
}
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/**
* ce_register_irq() - ce_register_irq
@@ -371,9 +371,9 @@ CDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
* Registers copy engine irqs matching mask. If a 1 is set at bit x,
* Register for copy engine x.
*
* Return: CDF_STATUS
* Return: QDF_STATUS
*/
CDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
{
int id;
int ret;
@@ -390,7 +390,7 @@ CDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
"%s: cannot register CE %d irq handler, ret = %d",
__func__, id, ret);
ce_unregister_irq(hif_ce_state, done_mask);
return CDF_STATUS_E_FAULT;
return QDF_STATUS_E_FAULT;
} else {
done_mask |= 1 << id;
}
@@ -402,5 +402,5 @@ CDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
ce_enable_irq_in_group_reg(HIF_GET_SOFTC(hif_ce_state), done_mask);
#endif
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}

Visa fil

@@ -31,6 +31,6 @@
void init_tasklet_workers(struct hif_opaque_softc *scn);
void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask);
void ce_tasklet_kill(struct hif_softc *scn);
CDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask);
CDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask);
QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask);
QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask);
#endif /* __CE_TASKLET_H__ */

Visa fil

@@ -21,22 +21,22 @@
#ifndef __HIF_DEBUG_H__
#define __HIF_DEBUG_H__
#include "cdf_trace.h"
#include "qdf_trace.h"
#define HIF_ERROR(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_ERROR, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, ## args)
#define HIF_WARN(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_WARN, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_WARN, ## args)
#define HIF_INFO(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, ## args)
#define HIF_INFO_HI(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO_HIGH, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH, ## args)
#define HIF_INFO_MED(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO_MED, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_MED, ## args)
#define HIF_INFO_LO(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_INFO_LOW, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_LOW, ## args)
#define HIF_TRACE(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_ERROR, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR, ## args)
#define HIF_DBG(args ...) \
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_DEBUG, ## args)
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, ## args)
#endif /* __HIF_DEBUG_H__ */

Visa fil

@@ -30,9 +30,10 @@
#include "athdefs.h"
#include "osapi_linux.h"
#include "targcfg.h"
#include "cdf_lock.h"
#include "cdf_status.h"
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "qdf_lock.h"
#include "qdf_status.h"
#include "qdf_status.h"
#include <qdf_atomic.h> /* qdf_atomic_read */
#include <targaddrs.h>
#include <bmi_msg.h>
#include "hif_io32.h"
@@ -45,8 +46,8 @@
#include "hif_hw_version.h"
#include "ce_api.h"
#include "ce_tasklet.h"
#include "cdf_trace.h"
#include "cdf_status.h"
#include "qdf_trace.h"
#include "qdf_status.h"
#ifdef CONFIG_CNSS
#include <net/cnss.h>
#endif
@@ -193,7 +194,7 @@ static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
return;
msg_callbacks->fwEventHandler(msg_callbacks->Context,
CDF_STATUS_E_FAILURE);
QDF_STATUS_E_FAILURE);
}
/**
@@ -279,7 +280,7 @@ void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
CDF_BUG(scn);
QDF_BUG(scn);
scn->linkstate_vote--;
if (scn->linkstate_vote == 0)
@@ -300,7 +301,7 @@ void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
CDF_BUG(scn);
QDF_BUG(scn);
scn->linkstate_vote++;
if (scn->linkstate_vote == 1)
@@ -322,7 +323,7 @@ void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
CDF_BUG(scn);
QDF_BUG(scn);
return scn->linkstate_vote == 0;
}
@@ -381,14 +382,14 @@ bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
/**
* init_buffer_count() - initial buffer count
* @maxSize: cdf_size_t
* @maxSize: qdf_size_t
*
* routine to modify the initial buffer count to be allocated on an os
* platform basis. Platform owner will need to modify this as needed
*
* Return: cdf_size_t
* Return: qdf_size_t
*/
cdf_size_t init_buffer_count(cdf_size_t maxSize)
qdf_size_t init_buffer_count(qdf_size_t maxSize)
{
return maxSize;
}
@@ -453,7 +454,7 @@ void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
/**
* hif_open(): hif_open
* @cdf_ctx: CDF Context
* @qdf_ctx: QDF Context
* @mode: Driver Mode
* @bus_type: Bus Type
* @cbk: CDS Callbacks
@@ -462,35 +463,35 @@ void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
*
* Return: HIF Opaque Pointer
*/
struct hif_opaque_softc *hif_open(cdf_device_t cdf_ctx, uint32_t mode,
enum ath_hal_bus_type bus_type,
struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
enum qdf_bus_type bus_type,
struct hif_callbacks *cbk)
{
struct hif_softc *scn;
CDF_STATUS status = CDF_STATUS_SUCCESS;
QDF_STATUS status = QDF_STATUS_SUCCESS;
int bus_context_size = hif_bus_get_context_size();
scn = (struct hif_softc *)cdf_mem_malloc(bus_context_size);
scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
if (!scn) {
HIF_ERROR("%s: cannot alloc memory for HIF context of size:%d",
__func__, bus_context_size);
return GET_HIF_OPAQUE_HDL(scn);
}
cdf_mem_zero(scn, bus_context_size);
qdf_mem_zero(scn, bus_context_size);
scn->cdf_dev = cdf_ctx;
scn->qdf_dev = qdf_ctx;
scn->hif_con_param = mode;
cdf_atomic_init(&scn->active_tasklet_cnt);
cdf_atomic_init(&scn->link_suspended);
cdf_atomic_init(&scn->tasklet_from_intr);
cdf_mem_copy(&scn->callbacks, cbk, sizeof(struct hif_callbacks));
qdf_atomic_init(&scn->active_tasklet_cnt);
qdf_atomic_init(&scn->link_suspended);
qdf_atomic_init(&scn->tasklet_from_intr);
qdf_mem_copy(&scn->callbacks, cbk, sizeof(struct hif_callbacks));
status = hif_bus_open(scn, bus_type);
if (status != CDF_STATUS_SUCCESS) {
if (status != QDF_STATUS_SUCCESS) {
HIF_ERROR("%s: hif_bus_open error = %d, bus_type = %d",
__func__, status, bus_type);
cdf_mem_free(scn);
qdf_mem_free(scn);
scn = NULL;
}
@@ -518,7 +519,7 @@ void hif_close(struct hif_opaque_softc *hif_ctx)
}
hif_bus_close(scn);
cdf_mem_free(scn);
qdf_mem_free(scn);
}
/**
@@ -530,23 +531,23 @@ void hif_close(struct hif_opaque_softc *hif_ctx)
* @bus_type: bus type
* @type: enable type
*
* Return: CDF_STATUS
* Return: QDF_STATUS
*/
CDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
void *bdev, const hif_bus_id *bid,
enum ath_hal_bus_type bus_type,
enum qdf_bus_type bus_type,
enum hif_enable_type type)
{
CDF_STATUS status;
QDF_STATUS status;
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
if (scn == NULL) {
HIF_ERROR("%s: hif_ctx = NULL", __func__);
return CDF_STATUS_E_NULL_VALUE;
return QDF_STATUS_E_NULL_VALUE;
}
status = hif_enable_bus(scn, dev, bdev, bid, type);
if (status != CDF_STATUS_SUCCESS) {
if (status != QDF_STATUS_SUCCESS) {
HIF_ERROR("%s: hif_enable_bus error = %d",
__func__, status);
return status;
@@ -558,7 +559,7 @@ CDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
if (hif_config_ce(scn)) {
HIF_ERROR("%s: Target probe failed.", __func__);
hif_disable_bus(scn);
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
return status;
}
/*
@@ -574,7 +575,7 @@ CDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
if (status < 0) {
HIF_ERROR("%s: ERROR - configure_IRQ_and_CE failed, status = %d",
__func__, status);
return CDF_STATUS_E_FAILURE;
return QDF_STATUS_E_FAILURE;
}
#endif
@@ -582,7 +583,7 @@ CDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
HIF_TRACE("%s: X OK", __func__);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/**
@@ -599,7 +600,7 @@ void hif_wlan_disable(struct hif_softc *scn)
enum icnss_driver_mode mode;
uint32_t con_mode = hif_get_conparam(scn);
if (CDF_GLOBAL_FTM_MODE == con_mode)
if (QDF_GLOBAL_FTM_MODE == con_mode)
mode = ICNSS_FTM;
else if (WLAN_IS_EPPING_ENABLED(con_mode))
mode = ICNSS_EPPING;
@@ -725,7 +726,7 @@ int hif_check_fw_reg(struct hif_opaque_softc *scn)
*
* Return: n/a
*/
void hif_read_phy_mem_base(struct hif_softc *scn, cdf_dma_addr_t *phy_mem_base)
void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
{
*phy_mem_base = scn->mem_pa;
}
@@ -901,7 +902,7 @@ void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
cdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
}
/**

Visa fil

@@ -42,8 +42,8 @@
#ifndef __HIF_MAIN_H__
#define __HIF_MAIN_H__
#include <cdf_atomic.h> /* cdf_atomic_read */
#include "cdf_lock.h"
#include <qdf_atomic.h> /* qdf_atomic_read */
#include "qdf_lock.h"
#include "cepci.h"
#include "hif.h"
@@ -113,9 +113,9 @@ struct hif_softc {
struct hif_config_info hif_config;
struct hif_target_info target_info;
void __iomem *mem;
enum ath_hal_bus_type bus_type;
enum qdf_bus_type bus_type;
void *ce_id_to_state[CE_COUNT_MAX];
cdf_device_t cdf_dev;
qdf_device_t qdf_dev;
bool hif_init_done;
bool request_irq_done;
/* Packet statistics */
@@ -139,7 +139,7 @@ struct hif_softc {
int fastpath_mode_on;
atomic_t tasklet_from_intr;
int htc_endpoint;
cdf_dma_addr_t mem_pa;
qdf_dma_addr_t mem_pa;
bool athdiag_procfs_inited;
#ifdef FEATURE_NAPI
struct qca_napi_data napi_data;
@@ -159,7 +159,7 @@ void athdiag_procfs_remove(void);
/* routine to modify the initial buffer count to be allocated on an os
* platform basis. Platform owner will need to modify this as needed
*/
cdf_size_t init_buffer_count(cdf_size_t maxSize);
qdf_size_t init_buffer_count(qdf_size_t maxSize);
irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
int hif_get_target_type(struct hif_softc *ol_sc, struct device *dev,
@@ -172,14 +172,14 @@ int hif_get_device_type(uint32_t device_id,
bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem);
void hif_nointrs(struct hif_softc *scn);
void hif_bus_close(struct hif_softc *ol_sc);
CDF_STATUS hif_bus_open(struct hif_softc *ol_sc,
enum ath_hal_bus_type bus_type);
CDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev,
QDF_STATUS hif_bus_open(struct hif_softc *ol_sc,
enum qdf_bus_type bus_type);
QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev,
void *bdev, const hif_bus_id *bid, enum hif_enable_type type);
void hif_disable_bus(struct hif_softc *scn);
void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag);
int hif_bus_get_context_size(void);
void hif_read_phy_mem_base(struct hif_softc *scn, cdf_dma_addr_t *bar_value);
void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value);
uint32_t hif_get_conparam(struct hif_softc *scn);
struct hif_callbacks *hif_get_callbacks_handle(struct hif_softc *scn);
bool hif_is_driver_unloading(struct hif_softc *scn);

Visa fil

@@ -424,7 +424,7 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
napi_info->stats[cpu].napi_polls++;
if (unlikely(NULL == hif))
CDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */
QDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */
else {
rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
HIF_INFO_HI("%s: ce_per_engine_service processed %d msgs",
@@ -461,7 +461,7 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
hif_napi_enable_irq(hif_ctx, napi_info->id);
/* support suspend/resume */
cdf_atomic_dec(&(hif->active_tasklet_cnt));
qdf_atomic_dec(&(hif->active_tasklet_cnt));
}
NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",

Visa fil

@@ -32,7 +32,7 @@
#include <hif.h>
#include "regtable.h"
#include "hif_debug.h"
#include "cdf_status.h"
#include "qdf_status.h"
#include "qwlan_version.h"
#include <net/cnss.h>
@@ -294,7 +294,7 @@ void icnss_dispatch_ce_irq(struct hif_softc *scn)
if (intr_summary == 0) {
if ((scn->target_status != OL_TRGET_STATUS_RESET) &&
(!cdf_atomic_read(&scn->link_suspended))) {
(!qdf_atomic_read(&scn->link_suspended))) {
hif_write32_mb(scn->mem +
(SOC_CORE_BASE_ADDRESS |

Visa fil

@@ -77,7 +77,7 @@ struct priv_ctrl_ctx {
static struct priv_ctrl_ctx g_priv_dump_ctx;
static INLINE void set_target_reg_bits(void __iomem *mem, uint32_t reg,
static inline void set_target_reg_bits(void __iomem *mem, uint32_t reg,
uint32_t bitmask, uint32_t val)
{
uint32_t value = hif_read32_mb(mem + (reg));
@@ -90,7 +90,7 @@ static INLINE void set_target_reg_bits(void __iomem *mem, uint32_t reg,
hif_write32_mb(mem + (reg), value);
}
static INLINE uint32_t get_target_reg_bits(void __iomem *mem,
static inline uint32_t get_target_reg_bits(void __iomem *mem,
uint32_t reg, uint32_t bitmask)
{
uint32_t value = hif_read32_mb(mem + (reg));
@@ -177,9 +177,9 @@ void priv_dump_chaninfo(struct hif_softc *scn)
hif_read32_mb(scn->mem +
BB_chn_tables_intf_data) &
0x0000ffff;
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
qdf_print("\n");
}
} else {
len = (bw == 2) ? 59 : 60;
@@ -187,10 +187,10 @@ void priv_dump_chaninfo(struct hif_softc *scn)
tmp =
hif_read32_mb(scn->mem +
BB_chn_tables_intf_data);
cdf_print("0x%x\t", ((tmp >> 16) & 0x0000ffff));
cdf_print("0x%x\t", (tmp & 0x0000ffff));
qdf_print("0x%x\t", ((tmp >> 16) & 0x0000ffff));
qdf_print("0x%x\t", (tmp & 0x0000ffff));
if (i % 2 == 0)
cdf_print("\n");
qdf_print("\n");
}
if (bw > 2) {
/* bw == 3 for vht80 */
@@ -202,11 +202,11 @@ void priv_dump_chaninfo(struct hif_softc *scn)
tmp =
hif_read32_mb(scn->mem +
BB_chn_tables_intf_data);
cdf_print("0x%x\t",
qdf_print("0x%x\t",
((tmp >> 16) & 0x0000ffff));
cdf_print("0x%x\t", (tmp & 0x0000ffff));
qdf_print("0x%x\t", (tmp & 0x0000ffff));
if (i % 2 == 0)
cdf_print("\n");
qdf_print("\n");
}
}
}
@@ -219,9 +219,9 @@ void priv_dump_chaninfo(struct hif_softc *scn)
hif_read32_mb(scn->mem +
BB_chn1_tables_intf_data) &
0x0000ffff;
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
qdf_print("\n");
}
} else {
len = (bw == 2) ? 59 : 60;
@@ -229,10 +229,10 @@ void priv_dump_chaninfo(struct hif_softc *scn)
tmp =
hif_read32_mb(scn->mem +
BB_chn1_tables_intf_data);
cdf_print("0x%x\n", (tmp >> 16) & 0x0000ffff);
cdf_print("0x%x\n", tmp & 0x0000ffff);
qdf_print("0x%x\n", (tmp >> 16) & 0x0000ffff);
qdf_print("0x%x\n", tmp & 0x0000ffff);
if (i % 2 == 0)
cdf_print("\n");
qdf_print("\n");
}
if (bw > 2) {
/* bw == 3 for vht80 */
@@ -244,11 +244,11 @@ void priv_dump_chaninfo(struct hif_softc *scn)
tmp =
hif_read32_mb(scn->mem +
BB_chn1_tables_intf_data);
cdf_print("0x%x\t",
qdf_print("0x%x\t",
((tmp >> 16) & 0x0000ffff));
cdf_print("0x%x\t", (tmp & 0x0000ffff));
qdf_print("0x%x\t", (tmp & 0x0000ffff));
if (i % 2 == 0)
cdf_print("\n");
qdf_print("\n");
}
}
}
@@ -283,9 +283,9 @@ void priv_dump_agc(struct hif_softc *scn)
BB_chaninfo_tab_b0 + i * 4);
val = hif_read32_mb(scn->mem +
PHY_BB_CHN_TABLES_INTF_DATA);
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
qdf_print("\n");
}
}
if (chain1) {
@@ -295,9 +295,9 @@ void priv_dump_agc(struct hif_softc *scn)
BB_chaninfo_tab_b0 + i * 4);
val = hif_read32_mb(scn->mem +
PHY_BB_CHN1_TABLES_INTF_DATA);
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
if (i % 4 == 0)
cdf_print("\n");
qdf_print("\n");
}
}
HIF_TRACE("%s: AGC history buffer dump X", __func__);
@@ -316,12 +316,12 @@ void priv_dump_bbwatchdog(struct hif_softc *scn)
HIF_TRACE("%s: BB watchdog dump E", __func__);
val = hif_read32_mb(scn->mem + BB_watchdog_status);
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
val = hif_read32_mb(scn->mem + BB_watchdog_ctrl_1);
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
val = hif_read32_mb(scn->mem + BB_watchdog_ctrl_2);
cdf_print("0x%x\t", val);
qdf_print("0x%x\t", val);
val = hif_read32_mb(scn->mem + BB_watchdog_status_B);
cdf_print("0x%x", val);
qdf_print("0x%x", val);
HIF_TRACE("%s: BB watchdog dump X", __func__);
}

Visa fil

@@ -33,7 +33,7 @@
#include "hif_main.h"
#include "regtable.h"
#include "ce_reg.h"
#include "cdf_atomic.h"
#include "qdf_atomic.h"
#include "if_pci.h"
/*
* For maximum performance and no power management, set this to 1.
@@ -258,13 +258,13 @@ static inline void ce_irq_enable(struct hif_softc *scn, int ce_id)
uint32_t tmp = 1 << ce_id;
struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
cdf_spin_lock_irqsave(&sc->irq_lock);
qdf_spin_lock_irqsave(&sc->irq_lock);
scn->ce_irq_summary &= ~tmp;
if (scn->ce_irq_summary == 0) {
/* Enable Legacy PCI line interrupts */
if (LEGACY_INTERRUPTS(sc) &&
(scn->target_status != OL_TRGET_STATUS_RESET) &&
(!cdf_atomic_read(&scn->link_suspended))) {
(!qdf_atomic_read(&scn->link_suspended))) {
hif_write32_mb(scn->mem +
(SOC_CORE_BASE_ADDRESS |
@@ -278,7 +278,7 @@ static inline void ce_irq_enable(struct hif_softc *scn, int ce_id)
}
if (scn->hif_init_done == true)
A_TARGET_ACCESS_END(scn);
cdf_spin_unlock_irqrestore(&sc->irq_lock);
qdf_spin_unlock_irqrestore(&sc->irq_lock);
/* check for missed firmware crash */
hif_fw_interrupt_handler(0, scn);

Visa fil

@@ -46,9 +46,9 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <osapi_linux.h>
#include "cdf_status.h"
#include "qdf_status.h"
#include "wma_api.h"
#include "cdf_atomic.h"
#include "qdf_atomic.h"
#include "wlan_hdd_power.h"
#include "wlan_hdd_main.h"
#ifdef CONFIG_CNSS
@@ -239,7 +239,7 @@ static irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
__func__,
hif_read32_mb(sc->mem + 0x80018),
hif_read32_mb(sc->mem + 0x8001c));
CDF_BUG(0);
QDF_BUG(0);
}
PCI_CLR_CAUSE0_REGISTER(sc);
@@ -260,9 +260,9 @@ static irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
if (ssr_irq) {
sc->irq_event = irq;
cdf_atomic_set(&scn->tasklet_from_intr, 1);
qdf_atomic_set(&scn->tasklet_from_intr, 1);
cdf_atomic_inc(&scn->active_tasklet_cnt);
qdf_atomic_inc(&scn->active_tasklet_cnt);
tasklet_schedule(&sc->intr_tq);
} else {
icnss_dispatch_ce_irq(scn);
@@ -302,13 +302,13 @@ void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
A_target_id_t pci_addr = scn->mem;
cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
/*
* If the deferred sleep timer is running cancel it
* and put the soc into sleep.
*/
if (hif_state->fake_sleep == true) {
cdf_softirq_timer_cancel(&hif_state->sleep_timer);
qdf_timer_stop(&hif_state->sleep_timer);
if (hif_state->verified_awake == false) {
hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS,
@@ -316,7 +316,7 @@ void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
}
hif_state->fake_sleep = false;
}
cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
}
#else
inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
@@ -367,7 +367,7 @@ static void hif_pci_device_reset(struct hif_pci_softc *sc)
if (hif_targ_is_awake(scn, mem))
break;
cdf_mdelay(1);
qdf_mdelay(1);
}
/* Put Target, including PCIe, into RESET. */
@@ -379,7 +379,7 @@ static void hif_pci_device_reset(struct hif_pci_softc *sc)
RTC_STATE_COLD_RESET_MASK)
break;
cdf_mdelay(1);
qdf_mdelay(1);
}
/* Pull Target, including PCIe, out of RESET. */
@@ -391,7 +391,7 @@ static void hif_pci_device_reset(struct hif_pci_softc *sc)
RTC_STATE_COLD_RESET_MASK))
break;
cdf_mdelay(1);
qdf_mdelay(1);
}
A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
@@ -432,7 +432,7 @@ void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (hif_targ_is_awake(scn, mem))
break;
cdf_mdelay(1);
qdf_mdelay(1);
}
/*
@@ -457,7 +457,7 @@ void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
HOST_GROUP0_MASK);
cdf_mdelay(100);
qdf_mdelay(100);
/* Clear FW_INDICATOR_ADDRESS */
if (HAS_FW_INDICATOR) {
@@ -490,7 +490,7 @@ void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
hif_read32_mb(mem +
(RTC_SOC_BASE_ADDRESS |
SOC_RESET_CONTROL_ADDRESS));
cdf_mdelay(10);
qdf_mdelay(10);
/* CE unreset */
val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
@@ -500,7 +500,7 @@ void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
hif_read32_mb(mem +
(RTC_SOC_BASE_ADDRESS |
SOC_RESET_CONTROL_ADDRESS));
cdf_mdelay(10);
qdf_mdelay(10);
/* Read Target CPU Intr Cause */
val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
@@ -522,7 +522,7 @@ void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
__func__, val);
cdf_mdelay(100);
qdf_mdelay(100);
HIF_INFO_MED("%s: Target Warm reset complete", __func__);
}
@@ -593,7 +593,7 @@ int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
cdf_mdelay(100);
qdf_mdelay(100);
timeout_count += 100;
}
@@ -816,8 +816,8 @@ static void reschedule_tasklet_work_handler(void *arg)
*/
static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
{
cdf_create_work(&sc->reschedule_tasklet_work,
reschedule_tasklet_work_handler, sc);
qdf_create_work(0, &sc->reschedule_tasklet_work,
reschedule_tasklet_work_handler, NULL);
}
#else
static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
@@ -831,7 +831,7 @@ static void wlan_tasklet(unsigned long data)
if (scn->hif_init_done == false)
goto end;
if (cdf_atomic_read(&scn->link_suspended))
if (qdf_atomic_read(&scn->link_suspended))
goto end;
if (!ADRASTEA_BU) {
@@ -841,8 +841,8 @@ static void wlan_tasklet(unsigned long data)
}
end:
cdf_atomic_set(&scn->tasklet_from_intr, 0);
cdf_atomic_dec(&scn->active_tasklet_cnt);
qdf_atomic_set(&scn->tasklet_from_intr, 0);
qdf_atomic_dec(&scn->active_tasklet_cnt);
}
#ifdef FEATURE_RUNTIME_PM
@@ -1049,7 +1049,7 @@ static void hif_pm_runtime_start(struct hif_pci_softc *sc)
return;
}
if (mode == CDF_FTM_MODE || WLAN_IS_EPPING_ENABLED(mode)) {
if (mode == QDF_FTM_MODE || WLAN_IS_EPPING_ENABLED(mode)) {
HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
__func__);
return;
@@ -1062,7 +1062,7 @@ static void hif_pm_runtime_start(struct hif_pci_softc *sc)
ol_sc->runtime_pm_delay);
cnss_runtime_init(sc->dev, ol_sc->runtime_pm_delay);
cdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
hif_runtime_pm_debugfs_create(sc);
}
@@ -1081,13 +1081,13 @@ static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
if (!ol_sc->enable_runtime_pm)
return;
if (mode == CDF_FTM_MODE || WLAN_IS_EPPING_ENABLED(mode))
if (mode == QDF_FTM_MODE || WLAN_IS_EPPING_ENABLED(mode))
return;
cnss_runtime_exit(sc->dev);
cnss_pm_runtime_request(sc->dev, CNSS_PM_RUNTIME_RESUME);
cdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
hif_runtime_pm_debugfs_remove(sc);
del_timer_sync(&sc->runtime_timer);
@@ -1104,10 +1104,10 @@ static void hif_pm_runtime_open(struct hif_pci_softc *sc)
{
spin_lock_init(&sc->runtime_lock);
cdf_atomic_init(&sc->pm_state);
qdf_atomic_init(&sc->pm_state);
sc->prevent_linkdown_lock =
hif_runtime_lock_init("linkdown suspend disabled");
cdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
INIT_LIST_HEAD(&sc->prevent_suspend_list);
}
@@ -1119,7 +1119,7 @@ static void hif_pm_runtime_open(struct hif_pci_softc *sc)
*/
static void hif_pm_runtime_close(struct hif_pci_softc *sc)
{
if (cdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
return;
else
hif_pm_runtime_stop(sc);
@@ -1193,16 +1193,16 @@ int hif_bus_get_context_size(void)
*
* Return: n/a
*/
CDF_STATUS hif_bus_open(struct hif_softc *ol_sc, enum ath_hal_bus_type bus_type)
QDF_STATUS hif_bus_open(struct hif_softc *ol_sc, enum qdf_bus_type bus_type)
{
struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
ol_sc->bus_type = bus_type;
hif_pm_runtime_open(sc);
cdf_spinlock_init(&sc->irq_lock);
qdf_spinlock_create(&sc->irq_lock);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/**
@@ -1228,8 +1228,8 @@ int hif_enable_pci(struct hif_pci_softc *sc,
uint16_t device_id;
struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
pci_read_config_word(pdev,PCI_DEVICE_ID,&device_id);
if(device_id != id->device) {
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
if (device_id != id->device) {
HIF_ERROR(
"%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
__func__, device_id, id->device);
@@ -1361,7 +1361,7 @@ int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
ret = -EAGAIN;
goto end;
}
cdf_mdelay(1);
qdf_mdelay(1);
targ_awake_limit--;
}
@@ -1374,7 +1374,7 @@ int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_RDY_STATUS_ADDRESS) \
& PCIE_SOC_RDY_STATUS_BAR_MASK)) {
cdf_mdelay(10);
qdf_mdelay(10);
}
if (wait_limit < 0) {
/* AR6320v1 doesn't support checking of BAR0 configuration,
@@ -1411,10 +1411,10 @@ static void wlan_tasklet_msi(unsigned long data)
if (scn->hif_init_done == false)
goto irq_handled;
if (cdf_atomic_read(&scn->link_suspended))
if (qdf_atomic_read(&scn->link_suspended))
goto irq_handled;
cdf_atomic_inc(&scn->active_tasklet_cnt);
qdf_atomic_inc(&scn->active_tasklet_cnt);
if (entry->id == HIF_MAX_TASKLET_NUM) {
/* the last tasklet is for fw IRQ */
@@ -1430,7 +1430,7 @@ static void wlan_tasklet_msi(unsigned long data)
return;
irq_handled:
cdf_atomic_dec(&scn->active_tasklet_cnt);
qdf_atomic_dec(&scn->active_tasklet_cnt);
}
@@ -1464,17 +1464,17 @@ int hif_configure_msi(struct hif_pci_softc *sc)
int i;
sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
sc->tasklet_entries[HIF_MAX_TASKLET_NUM -1].hif_handler =
sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
(void *)sc;
sc->tasklet_entries[HIF_MAX_TASKLET_NUM -1].id =
sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
HIF_MAX_TASKLET_NUM;
tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
(unsigned long)&sc->tasklet_entries[
HIF_MAX_TASKLET_NUM -1]);
HIF_MAX_TASKLET_NUM-1]);
ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
hif_pci_msi_fw_handler,
IRQF_SHARED, "wlan_pci", sc);
if(ret) {
if (ret) {
HIF_ERROR("%s: request_irq failed", __func__);
goto err_intr;
}
@@ -1487,7 +1487,7 @@ int hif_configure_msi(struct hif_pci_softc *sc)
i + MSI_ASSIGN_CE_INITIAL),
ce_per_engine_handler, IRQF_SHARED,
"wlan_pci", sc);
if(ret) {
if (ret) {
HIF_ERROR("%s: request_irq failed", __func__);
goto err_intr;
}
@@ -1495,7 +1495,8 @@ int hif_configure_msi(struct hif_pci_softc *sc)
} else if (rv > 0) {
HIF_TRACE("%s: use single msi", __func__);
if ((ret = pci_enable_msi(sc->pdev)) < 0) {
ret = pci_enable_msi(sc->pdev);
if (ret < 0) {
HIF_ERROR("%s: single MSI allocation failed",
__func__);
/* Try for legacy PCI line interrupts */
@@ -1507,7 +1508,7 @@ int hif_configure_msi(struct hif_pci_softc *sc)
ret = request_irq(sc->pdev->irq,
hif_pci_interrupt_handler,
IRQF_SHARED, "wlan_pci", sc);
if(ret) {
if (ret) {
HIF_ERROR("%s: request_irq failed", __func__);
goto err_intr;
}
@@ -1517,7 +1518,8 @@ int hif_configure_msi(struct hif_pci_softc *sc)
ret = -EIO;
HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
}
if ((ret = pci_enable_msi(sc->pdev)) < 0) {
ret = pci_enable_msi(sc->pdev);
if (ret < 0) {
HIF_ERROR("%s: single MSI interrupt allocation failed",
__func__);
/* Try for legacy PCI line interrupts */
@@ -1528,7 +1530,7 @@ int hif_configure_msi(struct hif_pci_softc *sc)
ret = request_irq(sc->pdev->irq,
hif_pci_interrupt_handler, IRQF_SHARED,
"wlan_pci", sc);
if(ret) {
if (ret) {
HIF_ERROR("%s: request_irq failed", __func__);
goto err_intr;
}
@@ -1564,7 +1566,7 @@ static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
ret = request_irq(sc->pdev->irq,
hif_pci_interrupt_handler, IRQF_SHARED,
"wlan_pci", sc);
if(ret) {
if (ret) {
HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
goto end;
}
@@ -1576,7 +1578,7 @@ static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_RESET);
end:
CDF_TRACE(CDF_MODULE_ID_HIF, CDF_TRACE_LEVEL_ERROR,
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
"%s: X, ret = %d", __func__, ret);
return ret;
}
@@ -1724,7 +1726,7 @@ static inline int hif_drain_tasklets(struct hif_softc *scn)
{
uint32_t ce_drain_wait_cnt = 0;
while (cdf_atomic_read(&scn->active_tasklet_cnt)) {
while (qdf_atomic_read(&scn->active_tasklet_cnt)) {
if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
HIF_ERROR("%s: CE still not done with access",
__func__);
@@ -1827,7 +1829,7 @@ static int hif_bus_suspend_link_down(struct hif_softc *scn)
/* Stop the HIF Sleep Timer */
hif_cancel_deferred_target_sleep(hif_hdl);
cdf_atomic_set(&scn->link_suspended, 1);
qdf_atomic_set(&scn->link_suspended, 1);
return 0;
}
@@ -1854,7 +1856,7 @@ static int hif_bus_resume_link_down(struct hif_softc *scn)
return -EFAULT;
}
cdf_atomic_set(&scn->link_suspended, 0);
qdf_atomic_set(&scn->link_suspended, 0);
enable_irq(pdev->irq);
@@ -1914,7 +1916,7 @@ static void __hif_runtime_pm_set_state(struct hif_softc *scn,
}
sc = scn->hif_sc;
cdf_atomic_set(&sc->pm_state, state);
qdf_atomic_set(&sc->pm_state, state);
}
@@ -2117,12 +2119,12 @@ static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
if (scn->fastpath_mode_on) {
if (Q_TARGET_ACCESS_BEGIN(scn)) {
ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
cdf_spin_lock_bh(&ce_state->ce_index_lock);
qdf_spin_lock_bh(&ce_state->ce_index_lock);
/*war_ce_src_ring_write_idx_set */
CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
ce_state->src_ring->write_index);
cdf_spin_unlock_bh(&ce_state->ce_index_lock);
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
Q_TARGET_ACCESS_END(scn);
}
}
@@ -2151,7 +2153,7 @@ static void hif_free_msi_ctx(struct hif_softc *scn)
{
struct hif_pci_softc *sc = scn->hif_sc;
struct hif_msi_info *info = &sc->msi_info;
struct device *dev = scn->cdf_dev->dev;
struct device *dev = scn->qdf_dev->dev;
OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
@@ -2174,7 +2176,7 @@ void hif_disable_isr(struct hif_opaque_softc *ol_sc)
/* Cancel the pending tasklet */
ce_tasklet_kill(scn);
tasklet_kill(&sc->intr_tq);
cdf_atomic_set(&scn->active_tasklet_cnt, 0);
qdf_atomic_set(&scn->active_tasklet_cnt, 0);
}
/* Function to reset SoC */
@@ -2299,7 +2301,7 @@ static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
hif_msm_pcie_debug_info(sc);
if (!cfg->enable_self_recovery)
CDF_BUG(0);
QDF_BUG(0);
scn->recovery = true;
@@ -2360,10 +2362,10 @@ hif_target_sleep_state_adjust(struct hif_softc *scn,
if (scn->recovery)
return -EACCES;
if (cdf_atomic_read(&scn->link_suspended)) {
if (qdf_atomic_read(&scn->link_suspended)) {
HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
debug = true;
CDF_ASSERT(0);
QDF_ASSERT(0);
return -EACCES;
}
@@ -2371,29 +2373,29 @@ hif_target_sleep_state_adjust(struct hif_softc *scn,
wait_for_it = true;
HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
__func__);
CDF_ASSERT(0);
QDF_ASSERT(0);
}
if (sleep_ok) {
cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
hif_state->keep_awake_count--;
if (hif_state->keep_awake_count == 0) {
/* Allow sleep */
hif_state->verified_awake = false;
hif_state->sleep_ticks = cdf_system_ticks();
hif_state->sleep_ticks = qdf_system_ticks();
}
if (hif_state->fake_sleep == false) {
/* Set the Fake Sleep */
hif_state->fake_sleep = true;
/* Start the Sleep Timer */
cdf_softirq_timer_cancel(&hif_state->sleep_timer);
cdf_softirq_timer_start(&hif_state->sleep_timer,
qdf_timer_stop(&hif_state->sleep_timer);
qdf_timer_start(&hif_state->sleep_timer,
HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
}
cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
} else {
cdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
if (hif_state->fake_sleep) {
hif_state->verified_awake = true;
@@ -2407,7 +2409,7 @@ hif_target_sleep_state_adjust(struct hif_softc *scn,
}
}
hif_state->keep_awake_count++;
cdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
if (wait_for_it && !hif_state->verified_awake) {
#define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
@@ -2673,7 +2675,7 @@ void hif_target_sync(struct hif_softc *scn)
PCIE_INTR_ENABLE_ADDRESS),
PCIE_INTR_FIRMWARE_MASK);
cdf_mdelay(10);
qdf_mdelay(10);
}
if (wait_limit < 0)
HIF_TRACE("%s: FW signal timed out",
@@ -2696,9 +2698,9 @@ void hif_target_sync(struct hif_softc *scn)
* @bdev: bus dev pointer
* bid: bus id pointer
* type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
* Return: CDF_STATUS
* Return: QDF_STATUS
*/
CDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
struct device *dev, void *bdev,
const hif_bus_id *bid,
enum hif_enable_type type)
@@ -2716,7 +2718,7 @@ CDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
if (!ol_sc) {
HIF_ERROR("%s: hif_ctx is NULL", __func__);
return CDF_STATUS_E_NOMEM;
return QDF_STATUS_E_NOMEM;
}
HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
@@ -2787,7 +2789,7 @@ err_tgtstate:
hif_disable_pci(sc);
sc->pci_enabled = false;
HIF_ERROR("%s: error, hif_disable_pci done", __func__);
return CDF_STATUS_E_ABORTED;
return QDF_STATUS_E_ABORTED;
err_enable_pci:
if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
@@ -2796,7 +2798,7 @@ err_enable_pci:
HIF_INFO("%s: pci reprobe", __func__);
/* 10, 40, 90, 100, 100, ... */
delay_time = max(100, 10 * (probe_again * probe_again));
cdf_mdelay(delay_time);
qdf_mdelay(delay_time);
goto again;
}
return ret;
@@ -2869,7 +2871,7 @@ int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
return -EFAULT;
}
pm_state = cdf_atomic_read(&sc->pm_state);
pm_state = qdf_atomic_read(&sc->pm_state);
if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
pm_state == HIF_PM_RUNTIME_STATE_NONE) {
@@ -2888,7 +2890,7 @@ int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
if (ret && ret != -EINPROGRESS) {
sc->pm_stats.runtime_get_err++;
HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
__func__, cdf_atomic_read(&sc->pm_state), ret);
__func__, qdf_atomic_read(&sc->pm_state), ret);
}
return ret;
@@ -2910,7 +2912,7 @@ int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
* This api will return a failure if runtime pm is stopped
* This api will return failure if it would decrement the usage count below 0.
*
* return: CDF_STATUS_SUCCESS if the put is performed
* return: QDF_STATUS_SUCCESS if the put is performed
*/
int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
{
@@ -2928,7 +2930,7 @@ int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
usage_count = atomic_read(&sc->dev->power.usage_count);
if (usage_count == 1) {
pm_state = cdf_atomic_read(&sc->pm_state);
pm_state = qdf_atomic_read(&sc->pm_state);
if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
error = "Ignoring unexpected put when runtime pm is disabled";
@@ -3002,7 +3004,7 @@ static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
hif_sc->pm_stats.prevent_suspend++;
HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
cdf_atomic_read(&hif_sc->pm_state), ret);
qdf_atomic_read(&hif_sc->pm_state), ret);
return ret;
}
@@ -3029,7 +3031,7 @@ static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
* context->active should be active for allow suspend to happen
* Handling this case here to prevent any failures.
*/
if ((cdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
&& usage_count == 1) || usage_count == 0) {
hif_pci_runtime_pm_warn(hif_sc,
"Allow without a prevent suspend");
@@ -3047,7 +3049,7 @@ static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
ret = hif_pm_runtime_put_auto(hif_sc->dev);
HIF_ERROR("%s: in pm_state:%d ret: %d", __func__,
cdf_atomic_read(&hif_sc->pm_state), ret);
qdf_atomic_read(&hif_sc->pm_state), ret);
hif_sc->pm_stats.allow_suspend++;
return ret;
@@ -3232,7 +3234,7 @@ int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
HIF_ERROR("%s: pm_state: %d delay: %dms ret: %d\n", __func__,
cdf_atomic_read(&hif_sc->pm_state), delay, ret);
qdf_atomic_read(&hif_sc->pm_state), delay, ret);
return ret;
}
@@ -3250,7 +3252,7 @@ struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name)
{
struct hif_pm_runtime_lock *context;
context = cdf_mem_malloc(sizeof(*context));
context = qdf_mem_malloc(sizeof(*context));
if (!context) {
HIF_ERROR("%s: No memory for Runtime PM wakelock context\n",
__func__);
@@ -3294,7 +3296,7 @@ void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
__hif_pm_runtime_allow_suspend(sc, context);
spin_unlock_irqrestore(&sc->runtime_lock, flags);
cdf_mem_free(context);
qdf_mem_free(context);
}
#endif /* FEATURE_RUNTIME_PM */

Visa fil

@@ -126,11 +126,11 @@ struct hif_pci_softc {
int irq_event;
int cacheline_sz;
u16 devid;
cdf_dma_addr_t soc_pcie_bar0;
qdf_dma_addr_t soc_pcie_bar0;
struct hif_tasklet_entry tasklet_entries[HIF_MAX_TASKLET_NUM];
bool pci_enabled;
cdf_spinlock_t irq_lock;
cdf_work_t reschedule_tasklet_work;
qdf_spinlock_t irq_lock;
qdf_work_t reschedule_tasklet_work;
uint32_t lcr_val;
#ifdef FEATURE_RUNTIME_PM
atomic_t pm_state;

Visa fil

@@ -41,7 +41,7 @@
#include "hif.h"
#include "regtable.h"
#include "ce_reg.h"
#include "cdf_atomic.h"
#include "qdf_atomic.h"
#include <soc/qcom/icnss.h>
#include "hif_main.h"
#include "hif_debug.h"

Visa fil

@@ -97,7 +97,7 @@ void hif_disable_isr(struct hif_opaque_softc *hif_ctx)
hif_nointrs(scn);
ce_tasklet_kill(scn);
cdf_atomic_set(&scn->active_tasklet_cnt, 0);
qdf_atomic_set(&scn->active_tasklet_cnt, 0);
}
/**
@@ -204,9 +204,9 @@ int hif_bus_get_context_size(void)
*
* Return: n/a
*/
CDF_STATUS hif_bus_open(struct hif_softc *scn, enum ath_hal_bus_type bus_type)
QDF_STATUS hif_bus_open(struct hif_softc *scn, enum qdf_bus_type bus_type)
{
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/**
@@ -245,9 +245,9 @@ int hif_get_target_type(struct hif_softc *ol_sc, struct device *dev,
* @bid: bus id
* @type: bus type
*
* Return: CDF_STATUS
* Return: QDF_STATUS
*/
CDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
struct device *dev, void *bdev,
const hif_bus_id *bid,
enum hif_enable_type type)
@@ -264,14 +264,14 @@ CDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
if (!ol_sc) {
HIF_ERROR("%s: hif_ctx is NULL", __func__);
return CDF_STATUS_E_NOMEM;
return QDF_STATUS_E_NOMEM;
}
ret = hif_get_target_type(ol_sc, dev, bdev, bid,
&hif_type, &target_type);
if (ret < 0) {
HIF_ERROR("%s: invalid device id/revision_id", __func__);
return CDF_STATUS_E_FAILURE;
return QDF_STATUS_E_FAILURE;
}
hif_register_tbl_attach(ol_sc, hif_type);
@@ -280,7 +280,7 @@ CDF_STATUS hif_enable_bus(struct hif_softc *ol_sc,
HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
__func__, hif_type, target_type);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/**

Visa fil

@@ -29,7 +29,7 @@
#include "htc_debug.h"
#include "htc_internal.h"
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <cdf_types.h> /* cdf_print */
#include <qdf_types.h> /* qdf_print */
#include <hif.h>
#include "epping_main.h"
#include "hif_io32.h"
@@ -69,16 +69,16 @@ static void destroy_htc_tx_ctrl_packet(HTC_PACKET *pPacket)
cdf_nbuf_free(netbuf);
}
cdf_mem_free(pPacket);
qdf_mem_free(pPacket);
}
static HTC_PACKET *build_htc_tx_ctrl_packet(cdf_device_t osdev)
static HTC_PACKET *build_htc_tx_ctrl_packet(qdf_device_t osdev)
{
HTC_PACKET *pPacket = NULL;
cdf_nbuf_t netbuf;
do {
pPacket = (HTC_PACKET *) cdf_mem_malloc(sizeof(HTC_PACKET));
pPacket = (HTC_PACKET *) qdf_mem_malloc(sizeof(HTC_PACKET));
if (NULL == pPacket) {
break;
}
@@ -86,9 +86,9 @@ static HTC_PACKET *build_htc_tx_ctrl_packet(cdf_device_t osdev)
netbuf =
cdf_nbuf_alloc(osdev, HTC_CONTROL_BUFFER_SIZE, 20, 4, true);
if (NULL == netbuf) {
cdf_mem_free(pPacket);
qdf_mem_free(pPacket);
pPacket = NULL;
cdf_print("%s: nbuf alloc failed\n", __func__);
qdf_print("%s: nbuf alloc failed\n", __func__);
break;
}
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
@@ -158,13 +158,13 @@ static void htc_cleanup(HTC_TARGET *target)
if (NULL == pPacket) {
break;
}
cdf_mem_free(pPacket);
qdf_mem_free(pPacket);
}
pPacket = target->pBundleFreeList;
while (pPacket) {
HTC_PACKET *pPacketTmp = (HTC_PACKET *) pPacket->ListLink.pNext;
cdf_mem_free(pPacket);
qdf_mem_free(pPacket);
pPacket = pPacketTmp;
}
#ifdef TODO_FIXME
@@ -178,21 +178,21 @@ static void htc_cleanup(HTC_TARGET *target)
cdf_nbuf_free(netbuf);
}
cdf_mem_free(pPacket);
qdf_mem_free(pPacket);
}
#endif
cdf_spinlock_destroy(&target->HTCLock);
cdf_spinlock_destroy(&target->HTCRxLock);
cdf_spinlock_destroy(&target->HTCTxLock);
cdf_spinlock_destroy(&target->HTCCreditLock);
qdf_spinlock_destroy(&target->HTCLock);
qdf_spinlock_destroy(&target->HTCRxLock);
qdf_spinlock_destroy(&target->HTCTxLock);
qdf_spinlock_destroy(&target->HTCCreditLock);
/* free our instance */
cdf_mem_free(target);
qdf_mem_free(target);
}
/* registered target arrival callback from the HIF layer */
HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, cdf_device_t osdev)
HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, qdf_device_t osdev)
{
struct hif_msg_callbacks htcCallbacks;
HTC_ENDPOINT *pEndpoint = NULL;
@@ -207,7 +207,7 @@ HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, cdf_device_t osdev)
A_REGISTER_MODULE_DEBUG_INFO(htc);
target = (HTC_TARGET *) cdf_mem_malloc(sizeof(HTC_TARGET));
target = (HTC_TARGET *) qdf_mem_malloc(sizeof(HTC_TARGET));
if (target == NULL) {
HTC_ERROR("%s: Unable to allocate memory", __func__);
return NULL;
@@ -216,10 +216,10 @@ HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, cdf_device_t osdev)
A_MEMZERO(target, sizeof(HTC_TARGET));
htc_runtime_pm_init(target);
cdf_spinlock_init(&target->HTCLock);
cdf_spinlock_init(&target->HTCRxLock);
cdf_spinlock_init(&target->HTCTxLock);
cdf_spinlock_init(&target->HTCCreditLock);
qdf_spinlock_create(&target->HTCLock);
qdf_spinlock_create(&target->HTCRxLock);
qdf_spinlock_create(&target->HTCTxLock);
qdf_spinlock_create(&target->HTCCreditLock);
do {
A_MEMCPY(&target->HTCInitInfo, pInfo, sizeof(HTC_INIT_INFO));
@@ -232,7 +232,7 @@ HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, cdf_device_t osdev)
for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
HTC_PACKET *pPacket =
(HTC_PACKET *) cdf_mem_malloc(sizeof(HTC_PACKET));
(HTC_PACKET *) qdf_mem_malloc(sizeof(HTC_PACKET));
if (pPacket != NULL) {
A_MEMZERO(pPacket, sizeof(HTC_PACKET));
free_htc_packet_container(target, pPacket);
@@ -250,7 +250,7 @@ HTC_HANDLE htc_create(void *ol_sc, HTC_INIT_INFO *pInfo, cdf_device_t osdev)
#endif
/* setup HIF layer callbacks */
cdf_mem_zero(&htcCallbacks, sizeof(struct hif_msg_callbacks));
qdf_mem_zero(&htcCallbacks, sizeof(struct hif_msg_callbacks));
htcCallbacks.Context = target;
htcCallbacks.rxCompletionHandler = htc_rx_completion_handler;
htcCallbacks.txCompletionHandler = htc_tx_completion_handler;
@@ -348,7 +348,7 @@ A_STATUS htc_setup_target_buffer_assignments(HTC_TARGET *target)
* There is no WMI mesage exchanges between host and target
* in endpoint ping case.
* In host side, the endpoint ping driver is a Ethernet driver
* and it directly sits on HTC. Only HIF, HTC, CDF, ADF are
* and it directly sits on HTC. Only HIF, HTC, QDF, ADF are
* used by the endpoint ping driver. There is no wifi stack
* at all in host side also. For tx perf use case,
* the user space mboxping app sends the raw packets to endpoint
@@ -529,7 +529,7 @@ static void reset_endpoint_states(HTC_TARGET *target)
pEndpoint->target = target;
/* pEndpoint->TxCreditFlowEnabled = (A_BOOL)htc_credit_flow; */
pEndpoint->TxCreditFlowEnabled = (A_BOOL) 1;
cdf_atomic_init(&pEndpoint->TxProcessCount);
qdf_atomic_init(&pEndpoint->TxProcessCount);
}
}
@@ -551,7 +551,7 @@ A_STATUS htc_start(HTC_HANDLE HTCHandle)
pSendPacket = htc_alloc_control_tx_packet(target);
if (NULL == pSendPacket) {
AR_DEBUG_ASSERT(false);
cdf_print("%s: allocControlTxPacket failed\n",
qdf_print("%s: allocControlTxPacket failed\n",
__func__);
status = A_NO_MEMORY;
break;
@@ -658,8 +658,8 @@ void htc_stop(HTC_HANDLE HTCHandle)
htc_flush_rx_hold_queue(target, pEndpoint);
htc_flush_endpoint_tx(target, pEndpoint, HTC_TX_PACKET_TAG_ALL);
if (pEndpoint->ul_is_polled) {
cdf_softirq_timer_cancel(&pEndpoint->ul_poll_timer);
cdf_softirq_timer_free(&pEndpoint->ul_poll_timer);
qdf_timer_stop(&pEndpoint->ul_poll_timer);
qdf_timer_free(&pEndpoint->ul_poll_timer);
}
}
@@ -693,7 +693,7 @@ void htc_stop(HTC_HANDLE HTCHandle)
*/
void htc_runtime_pm_init(HTC_TARGET *target)
{
cdf_create_work(&target->queue_kicker, htc_kick_queues, target);
qdf_create_work(0, &target->queue_kicker, htc_kick_queues, target);
}
/**
@@ -706,11 +706,11 @@ void htc_runtime_pm_init(HTC_TARGET *target)
*/
int htc_runtime_suspend(void)
{
ol_txrx_pdev_handle txrx_pdev = cds_get_context(CDF_MODULE_ID_TXRX);
ol_txrx_pdev_handle txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (txrx_pdev == NULL) {
HTC_ERROR("%s: txrx context null", __func__);
return CDF_STATUS_E_FAULT;
return QDF_STATUS_E_FAULT;
}
if (ol_txrx_get_tx_pending(txrx_pdev))
@@ -729,13 +729,13 @@ int htc_runtime_suspend(void)
*/
int htc_runtime_resume(void)
{
HTC_HANDLE htc_ctx = cds_get_context(CDF_MODULE_ID_HTC);
HTC_HANDLE htc_ctx = cds_get_context(QDF_MODULE_ID_HTC);
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_ctx);
if (target == NULL)
return 0;
cdf_schedule_work(&target->queue_kicker);
qdf_sched_work(0, &target->queue_kicker);
return 0;
}
@@ -866,9 +866,9 @@ void htc_cancel_deferred_target_sleep(void *context)
* Return: None
*/
void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr)
qdf_dma_addr_t *ce_reg_paddr)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle);

Visa fil

@@ -33,7 +33,7 @@
#include "htc_packet.h"
#include <htc.h>
#include <htc_services.h>
#include <cdf_types.h> /* cdf_device_t */
#include <qdf_types.h> /* qdf_device_t */
#ifdef __cplusplus
extern "C" {
@@ -51,11 +51,11 @@ typedef void *HTC_HANDLE;
typedef A_UINT16 HTC_SERVICE_ID;
typedef void (*HTC_TARGET_FAILURE)(void *Instance, CDF_STATUS Status);
typedef void (*HTC_TARGET_FAILURE)(void *Instance, QDF_STATUS Status);
typedef struct _HTC_INIT_INFO {
void *pContext; /* context for target notifications */
void (*TargetFailure)(void *Instance, CDF_STATUS Status);
void (*TargetFailure)(void *Instance, QDF_STATUS Status);
void (*TargetSendSuspendComplete)(void *ctx);
} HTC_INIT_INFO;
@@ -151,18 +151,18 @@ typedef struct _HTC_EP_CALLBACKS {
HTC_EP_RECV_ALLOC EpRecvAlloc; /* OPTIONAL recv allocation callback */
HTC_EP_RECV_ALLOC EpRecvAllocThresh; /* OPTIONAL recv allocation callback based on a threshold */
HTC_EP_SEND_PKT_COMP_MULTIPLE EpTxCompleteMultiple; /* OPTIONAL completion handler for multiple complete
indications (EpTxComplete must be NULL) */
indications (EpTxComplete must be NULL) */
HTC_EP_RECV_PKT_MULTIPLE EpRecvPktMultiple; /* OPTIONAL completion handler for multiple
recv packet indications (EpRecv must be NULL) */
recv packet indications (EpRecv must be NULL) */
HTC_EP_RESUME_TX_QUEUE ep_resume_tx_queue;
int RecvAllocThreshold; /* if EpRecvAllocThresh is non-NULL, HTC will compare the
threshold value to the current recv packet length and invoke
the EpRecvAllocThresh callback to acquire a packet buffer */
threshold value to the current recv packet length and invoke
the EpRecvAllocThresh callback to acquire a packet buffer */
int RecvRefillWaterMark; /* if a EpRecvRefill handler is provided, this value
can be used to set a trigger refill callback
when the recv queue drops below this value
if set to 0, the refill is only called when packets
are empty */
can be used to set a trigger refill callback
when the recv queue drops below this value
if set to 0, the refill is only called when packets
are empty */
} HTC_EP_CALLBACKS;
/* service connection information */
@@ -196,32 +196,32 @@ typedef struct _HTC_ENDPOINT_CREDIT_DIST {
HTC_SERVICE_ID service_id; /* Service ID (set by HTC) */
HTC_ENDPOINT_ID Endpoint; /* endpoint for this distribution struct (set by HTC) */
A_UINT32 DistFlags; /* distribution flags, distribution function can
set default activity using SET_EP_ACTIVE() macro */
set default activity using SET_EP_ACTIVE() macro */
int TxCreditsNorm; /* credits for normal operation, anything above this
indicates the endpoint is over-subscribed, this field
is only relevant to the credit distribution function */
indicates the endpoint is over-subscribed, this field
is only relevant to the credit distribution function */
int TxCreditsMin; /* floor for credit distribution, this field is
only relevant to the credit distribution function */
only relevant to the credit distribution function */
int TxCreditsAssigned; /* number of credits assigned to this EP, this field
is only relevant to the credit dist function */
is only relevant to the credit dist function */
int TxCredits; /* current credits available, this field is used by
HTC to determine whether a message can be sent or
must be queued */
HTC to determine whether a message can be sent or
must be queued */
int TxCreditsToDist; /* pending credits to distribute on this endpoint, this
is set by HTC when credit reports arrive.
The credit distribution functions sets this to zero
when it distributes the credits */
is set by HTC when credit reports arrive.
The credit distribution functions sets this to zero
when it distributes the credits */
int TxCreditsSeek; /* this is the number of credits that the current pending TX
packet needs to transmit. This is set by HTC when
and endpoint needs credits in order to transmit */
packet needs to transmit. This is set by HTC when
and endpoint needs credits in order to transmit */
int TxCreditSize; /* size in bytes of each credit (set by HTC) */
int TxCreditsPerMaxMsg; /* credits required for a maximum sized messages (set by HTC) */
void *pHTCReserved; /* reserved for HTC use */
int TxQueueDepth; /* current depth of TX queue , i.e. messages waiting for credits
This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
that has non-zero credits to recover
*/
This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
that has non-zero credits to recover
*/
} HTC_ENDPOINT_CREDIT_DIST;
#define HTC_EP_ACTIVE ((A_UINT32) (1u << 31))
@@ -235,11 +235,11 @@ typedef struct _HTC_ENDPOINT_CREDIT_DIST {
* there are mandatory and optional codes that must be handled */
typedef enum _HTC_CREDIT_DIST_REASON {
HTC_CREDIT_DIST_SEND_COMPLETE = 0, /* credits available as a result of completed
send operations (MANDATORY) resulting in credit reports */
send operations (MANDATORY) resulting in credit reports */
HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1, /* a change in endpoint activity occured (OPTIONAL) */
HTC_CREDIT_DIST_SEEK_CREDITS, /* an endpoint needs to "seek" credits (OPTIONAL) */
HTC_DUMP_CREDIT_STATE /* for debugging, dump any state information that is kept by
the distribution function */
the distribution function */
} HTC_CREDIT_DIST_REASON;
typedef void (*HTC_CREDIT_DIST_CALLBACK)(void *Context,
@@ -263,7 +263,7 @@ typedef enum _HTC_ENDPOINT_STAT_ACTION {
typedef struct _HTC_ENDPOINT_STATS {
A_UINT32 TxPosted; /* number of TX packets posted to the endpoint */
A_UINT32 TxCreditLowIndications; /* number of times the host set the credit-low flag in a send message on
this endpoint */
this endpoint */
A_UINT32 TxIssued; /* running count of total TX packets issued */
A_UINT32 TxPacketsBundled; /* running count of TX packets that were issued in bundles */
A_UINT32 TxBundles; /* running count of TX bundles that were issued */
@@ -279,7 +279,7 @@ typedef struct _HTC_ENDPOINT_STATS {
A_UINT32 TxCreditsReturned; /* count of credits returned */
A_UINT32 RxReceived; /* count of RX packets received */
A_UINT32 RxLookAheads; /* count of lookahead records
found in messages received on this endpoint */
found in messages received on this endpoint */
A_UINT32 RxPacketsBundled; /* count of recv packets received in a bundle */
A_UINT32 RxBundleLookAheads; /* count of number of bundled lookaheads */
A_UINT32 RxBundleIndFromHdr; /* count of the number of bundle indications from the HTC header */
@@ -292,7 +292,7 @@ typedef struct _HTC_ENDPOINT_STATS {
@desc: Create an instance of HTC over the underlying HIF device
@function name: htc_create
@input: HifDevice - hif device handle,
pInfo - initialization information
pInfo - initialization information
@output:
@return: HTC_HANDLE on success, NULL on failure
@notes:
@@ -300,7 +300,7 @@ typedef struct _HTC_ENDPOINT_STATS {
@see also: htc_destroy
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
HTC_HANDLE htc_create(void *HifDevice,
HTC_INIT_INFO *pInfo, cdf_device_t osdev);
HTC_INIT_INFO *pInfo, qdf_device_t osdev);
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@desc: Get the underlying HIF device handle
@function name: htc_get_hif_device
@@ -316,18 +316,19 @@ void *htc_get_hif_device(HTC_HANDLE HTCHandle);
@desc: Set credit distribution parameters
@function name: htc_set_credit_distribution
@input: HTCHandle - HTC handle
pCreditDistCont - caller supplied context to pass into distribution functions
CreditDistFunc - Distribution function callback
CreditDistInit - Credit Distribution initialization callback
ServicePriorityOrder - Array containing list of service IDs, lowest index is highest
priority
ListLength - number of elements in ServicePriorityOrder
pCreditDistCont - caller supplied context to pass into distribution functions
CreditDistFunc - Distribution function callback
CreditDistInit - Credit Distribution initialization callback
ServicePriorityOrder - Array containing list of service IDs, lowest index
is highestpriority
ListLength - number of elements in ServicePriorityOrder
@output:
@return:
@notes: The user can set a custom credit distribution function to handle special requirements
for each endpoint. A default credit distribution routine can be used by setting
CreditInitFunc to NULL. The default credit distribution is only provided for simple
"fair" credit distribution without regard to any prioritization.
@notes : The user can set a custom credit distribution function to handle
special requirementsfor each endpoint. A default credit distribution
routine can be used by setting CreditInitFunc to NULL. The default
credit distribution is only provided for simple "fair" credit distribution
without regard to any prioritization.
@example:
@see also:
@@ -345,8 +346,8 @@ void htc_set_credit_distribution(HTC_HANDLE HTCHandle,
@output:
@return:
@notes: This API blocks until the target responds with an HTC ready message.
The caller should not connect services until the target has indicated it is
ready.
The caller should not connect services until the target has indicated it is
ready.
@example:
@see also: htc_connect_service
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -357,11 +358,12 @@ A_STATUS htc_wait_target(HTC_HANDLE HTCHandle);
@input: HTCHandle - HTC handle
@output:
@return:
@notes: This API indicates to the target that the service connection phase is complete
and the target can freely start all connected services. This API should only be
called AFTER all service connections have been made. TCStart will issue a
SETUP_COMPLETE message to the target to indicate that all service connections
have been made and the target can start communicating over the endpoints.
@notes: This API indicates to the target that the service connection phase
is completeand the target can freely start all connected services. This
API should only be called AFTER all service connections have been made.
TCStart will issue a SETUP_COMPLETE message to the target to indicate that
all service connections have been made and the target can start
communicating over the endpoints.
@example:
@see also: htc_connect_service
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -370,12 +372,12 @@ A_STATUS htc_start(HTC_HANDLE HTCHandle);
@desc: Add receive packet to HTC
@function name: htc_add_receive_pkt
@input: HTCHandle - HTC handle
pPacket - HTC receive packet to add
pPacket - HTC receive packet to add
@output:
@return: A_OK on success
@notes: user must supply HTC packets for capturing incomming HTC frames. The caller
must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
macro.
@notes: user must supply HTC packets for capturing incomming HTC frames.
The caller must initialize each HTC packet using the
SET_HTC_PACKET_INFO_RX_REFILL() macro.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -384,11 +386,11 @@ A_STATUS htc_add_receive_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket);
@desc: Connect to an HTC service
@function name: htc_connect_service
@input: HTCHandle - HTC handle
pReq - connection details
pReq - connection details
@output: pResp - connection response
@return:
@notes: Service connections must be performed before htc_start. User provides callback handlers
for various endpoint events.
@notes: Service connections must be performed before htc_start.
User provides callback handlersfor various endpoint events.
@example:
@see also: htc_start
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -399,8 +401,8 @@ A_STATUS htc_connect_service(HTC_HANDLE HTCHandle,
@desc: HTC register log dump
@function name: htc_dump
@input: HTCHandle - HTC handle
CmdId - Log command
start - start/print logs
CmdId - Log command
start - start/print logs
@output:
@return:
@notes: Register logs will be started/printed.
@@ -414,12 +416,12 @@ void htc_dump(HTC_HANDLE HTCHandle, uint8_t CmdId, bool start);
@desc: Send an HTC packet
@function name: htc_send_pkt
@input: HTCHandle - HTC handle
pPacket - packet to send
pPacket - packet to send
@output:
@return: A_OK
@notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro.
This interface is fully asynchronous. On error, HTC SendPkt will
call the registered Endpoint callback to cleanup the packet.
This interface is fully asynchronous. On error, HTC SendPkt will
call the registered Endpoint callback to cleanup the packet.
@example:
@see also: htc_flush_endpoint
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -428,15 +430,15 @@ A_STATUS htc_send_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket);
@desc: Send an HTC packet containing a tx descriptor and data
@function name: htc_send_data_pkt
@input: HTCHandle - HTC handle
pPacket - packet to send
pPacket - packet to send
@output:
@return: A_OK
@notes: Caller must initialize packet using SET_HTC_PACKET_INFO_TX() macro.
Caller must provide headroom in an initial fragment added to the
network buffer to store a HTC_FRAME_HDR.
This interface is fully asynchronous. On error, htc_send_data_pkt will
call the registered Endpoint EpDataTxComplete callback to cleanup
the packet.
Caller must provide headroom in an initial fragment added to the
network buffer to store a HTC_FRAME_HDR.
This interface is fully asynchronous. On error, htc_send_data_pkt will
call the registered Endpoint EpDataTxComplete callback to cleanup
the packet.
@example:
@see also: htc_send_pkt
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -454,7 +456,7 @@ A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket,
@output:
@return:
@notes: All receive and pending TX packets will
be flushed.
be flushed.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -466,7 +468,7 @@ void htc_flush_surprise_remove(HTC_HANDLE HTCHandle);
@output:
@return:
@notes: HTC communications is halted. All receive and pending TX packets will
be flushed.
be flushed.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -486,12 +488,12 @@ void htc_destroy(HTC_HANDLE HTCHandle);
@desc: Flush pending TX packets
@function name: htc_flush_endpoint
@input: HTCHandle - HTC handle
Endpoint - Endpoint to flush
Tag - flush tag
Endpoint - Endpoint to flush
Tag - flush tag
@output:
@return:
@notes: The Tag parameter is used to selectively flush packets with matching tags.
The value of 0 forces all packets to be flush regardless of tag.
The value of 0 forces all packets to be flush regardless of tag.
@example:
@see also: htc_send_pkt
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -512,12 +514,12 @@ void htc_dump_credit_states(HTC_HANDLE HTCHandle);
@desc: Indicate a traffic activity change on an endpoint
@function name: htc_indicate_activity_change
@input: HTCHandle - HTC handle
Endpoint - endpoint in which activity has changed
Active - true if active, false if it has become inactive
Endpoint - endpoint in which activity has changed
Active - true if active, false if it has become inactive
@output:
@return:
@notes: This triggers the registered credit distribution function to
re-adjust credits for active/inactive endpoints.
re-adjust credits for active/inactive endpoints.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -528,24 +530,26 @@ void htc_indicate_activity_change(HTC_HANDLE HTCHandle,
@desc: Get endpoint statistics
@function name: htc_get_endpoint_statistics
@input: HTCHandle - HTC handle
Endpoint - Endpoint identifier
Action - action to take with statistics
Endpoint - Endpoint identifier
Action - action to take with statistics
@output:
pStats - statistics that were sampled (can be NULL if Action is HTC_EP_STAT_CLEAR)
pStats - statistics that were sampled (can be NULL if Action is HTC_EP_STAT_CLEAR)
@return: true if statistics profiling is enabled, otherwise false.
@notes: Statistics is a compile-time option and this function may return false
if HTC is not compiled with profiling.
@notes : Statistics is a compile-time option and this function may return
false if HTC is not compiled with profiling.
The caller can specify the statistic "action" to take when sampling
the statistics. This includes:
The caller can specify the statistic "action" to take when sampling
the statistics. This includes :
HTC_EP_STAT_SAMPLE: The pStats structure is filled with the current values.
HTC_EP_STAT_SAMPLE_AND_CLEAR: The structure is filled and the current statistics
are cleared.
HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass a NULL value for
pStats
HTC_EP_STAT_SAMPLE : The pStats structure is filled with the current
values.
HTC_EP_STAT_SAMPLE_AND_CLEAR : The structure is filled and the current
statisticsare cleared.
HTC_EP_STAT_CLEA : the statistics are cleared, the called can pass a NULL
value forpStats
@example:
@see also:
@@ -562,12 +566,13 @@ A_BOOL htc_get_endpoint_statistics(HTC_HANDLE HTCHandle,
@output:
@return:
@notes:
HTC will block the receiver if the EpRecvAlloc callback fails to provide a packet.
The caller can use this API to indicate to HTC when resources (buffers) are available
such that the receiver can be unblocked and HTC may re-attempt fetching the pending message.
HTC will block the receiver if the EpRecvAlloc callback fails to provide a
packet. The caller can use this API to indicate to HTC when resources
(buffers) are available such that the receiver can be unblocked and HTC
may re-attempt fetching the pending message.
This API is not required if the user uses the EpRecvRefill callback or uses the HTCAddReceivePacket()
API to recycle or provide receive packets to HTC.
This API is not required if the user uses the EpRecvRefill callback or uses
the HTCAddReceivePacket()API to recycle or provide receive packets to HTC.
@example:
@see also:
@@ -578,17 +583,17 @@ void htc_unblock_recv(HTC_HANDLE HTCHandle);
@desc: send a series of HTC packets
@function name: htc_send_pkts_multiple
@input: HTCHandle - HTC handle
pPktQueue - local queue holding packets to send
pPktQueue - local queue holding packets to send
@output:
@return: A_OK
@notes: Caller must initialize each packet using SET_HTC_PACKET_INFO_TX() macro.
The queue must only contain packets directed at the same endpoint.
Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the TX packets in FIFO order.
This API will remove the packets from the pkt queue and place them into the HTC Tx Queue
and bundle messages where possible.
The caller may allocate the pkt queue on the stack to hold the packets.
This interface is fully asynchronous. On error, htc_send_pkts will
call the registered Endpoint callback to cleanup the packet.
The queue must only contain packets directed at the same endpoint.
Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the TX packets in FIFO order.
This API will remove the packets from the pkt queue and place them into the HTC Tx Queue
and bundle messages where possible.
The caller may allocate the pkt queue on the stack to hold the packets.
This interface is fully asynchronous. On error, htc_send_pkts will
call the registered Endpoint callback to cleanup the packet.
@example:
@see also: htc_flush_endpoint
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -599,16 +604,16 @@ A_STATUS htc_send_pkts_multiple(HTC_HANDLE HTCHandle,
@desc: Add multiple receive packets to HTC
@function name: htc_add_receive_pkt_multiple
@input: HTCHandle - HTC handle
pPktQueue - HTC receive packet queue holding packets to add
pPktQueue - HTC receive packet queue holding packets to add
@output:
@return: A_OK on success
@notes: user must supply HTC packets for capturing incomming HTC frames. The caller
must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
macro. The queue must only contain recv packets for the same endpoint.
Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the recv packet.
This API will remove the packets from the pkt queue and place them into internal
recv packet list.
The caller may allocate the pkt queue on the stack to hold the packets.
must initialize each HTC packet using the SET_HTC_PACKET_INFO_RX_REFILL()
macro. The queue must only contain recv packets for the same endpoint.
Caller supplies a pointer to an HTC_PACKET_QUEUE structure holding the recv packet.
This API will remove the packets from the pkt queue and place them into internal
recv packet list.
The caller may allocate the pkt queue on the stack to hold the packets.
@example:
@see also:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
@@ -619,7 +624,7 @@ A_STATUS htc_add_receive_pkt_multiple(HTC_HANDLE HTCHandle,
@desc: Check if an endpoint is marked active
@function name: htc_is_endpoint_active
@input: HTCHandle - HTC handle
Endpoint - endpoint to check for active state
Endpoint - endpoint to check for active state
@output:
@return: returns true if Endpoint is Active
@notes:
@@ -633,7 +638,7 @@ A_BOOL htc_is_endpoint_active(HTC_HANDLE HTCHandle,
@desc: Get the number of recv buffers currently queued into an HTC endpoint
@function name: htc_get_num_recv_buffers
@input: HTCHandle - HTC handle
Endpoint - endpoint to check
Endpoint - endpoint to check
@output:
@return: returns number of buffers in queue
@notes:
@@ -647,7 +652,7 @@ int htc_get_num_recv_buffers(HTC_HANDLE HTCHandle,
@desc: Set the target failure handling callback in HTC layer
@function name: htc_set_target_failure_callback
@input: HTCHandle - HTC handle
Callback - target failure handling callback
Callback - target failure handling callback
@output:
@return:
@notes:
@@ -668,14 +673,16 @@ A_STATUS HTCWaitForPendingRecv(HTC_HANDLE HTCHandle,
struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE
HTCHandle);
#ifdef HIF_USB
#define HTCReturnReceivePkt(target,p,osbuf) \
A_NETBUF_FREE(osbuf); \
if(p->Status == A_CLONE) { \
cdf_mem_free(p); \
}
#ifdef HIF_USB
#define HTCReturnReceivePkt(target, p, osbuf) \
do { \
A_NETBUF_FREE(osbuf); \
if (p->Status == A_CLONE) { \
qdf_mem_free(p); \
} \
} while (0)
#else
#define HTCReturnReceivePkt(target,p,osbuf) htc_add_receive_pkt(target,p)
#define HTCReturnReceivePkt(target, p, osbuf) htc_add_receive_pkt(target, p)
#endif
#ifdef WLAN_FEATURE_FASTPATH
@@ -708,9 +715,9 @@ void htc_vote_link_down(HTC_HANDLE HTCHandle);
void htc_vote_link_up(HTC_HANDLE HTCHandle);
#ifdef IPA_OFFLOAD
void htc_ipa_get_ce_resource(HTC_HANDLE htc_handle,
cdf_dma_addr_t *ce_sr_base_paddr,
qdf_dma_addr_t *ce_sr_base_paddr,
uint32_t *ce_sr_ring_size,
cdf_dma_addr_t *ce_reg_paddr);
qdf_dma_addr_t *ce_reg_paddr);
#else
#define htc_ipa_get_ce_resource(htc_handle, \
ce_sr_base_paddr, \

Visa fil

@@ -30,7 +30,7 @@
#define ATH_MODULE_NAME htc
#include "a_debug.h"
#include "cdf_trace.h"
#include "qdf_trace.h"
/* ------- Debug related stuff ------- */
@@ -40,11 +40,11 @@
#define ATH_DEBUG_DUMP ATH_DEBUG_MAKE_MODULE_MASK(3)
#define ATH_DEBUG_SETUP ATH_DEBUG_MAKE_MODULE_MASK(4)
#define HTC_ERROR(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_ERROR, ## args)
QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_ERROR, ## args)
#define HTC_WARN(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_WARN, ## args)
QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_WARN, ## args)
#define HTC_INFO(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_INFO, ## args)
QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_INFO, ## args)
#define HTC_TRACE(args ...) \
CDF_TRACE(CDF_MODULE_ID_HTC, CDF_TRACE_LEVEL_DEBUG, ## args)
QDF_TRACE(QDF_MODULE_ID_HTC, QDF_TRACE_LEVEL_DEBUG, ## args)
#endif /*HTC_DEBUG_H_ */

Visa fil

@@ -36,10 +36,10 @@ extern "C" {
#include "a_types.h"
#include "osapi_linux.h"
#include <cdf_nbuf.h>
#include <cdf_types.h>
#include <cdf_lock.h>
#include <cdf_softirq_timer.h>
#include <cdf_atomic.h>
#include <qdf_types.h>
#include <qdf_lock.h>
#include <qdf_timer.h>
#include <qdf_atomic.h>
#include "hif.h"
#include <htc.h>
#include "htc_api.h"
@@ -105,23 +105,23 @@ typedef struct _HTC_ENDPOINT {
HTC_EP_CALLBACKS EpCallBacks; /* callbacks associated with this endpoint */
HTC_PACKET_QUEUE TxQueue; /* HTC frame buffer TX queue */
int MaxTxQueueDepth; /* max depth of the TX queue before we need to
call driver's full handler */
call driver's full handler */
int MaxMsgLength; /* max length of endpoint message */
uint8_t UL_PipeID;
uint8_t DL_PipeID;
int ul_is_polled; /* Need to call HIF to get tx completion callbacks? */
cdf_softirq_timer_t ul_poll_timer;
qdf_timer_t ul_poll_timer;
int ul_poll_timer_active;
int ul_outstanding_cnt;
int dl_is_polled; /* Need to call HIF to fetch rx? (Not currently supported.) */
#if 0 /* not currently supported */
cdf_softirq_timer_t dl_poll_timer;
qdf_timer_t dl_poll_timer;
#endif
HTC_PACKET_QUEUE TxLookupQueue; /* lookup queue to match netbufs to htc packets */
HTC_PACKET_QUEUE RxBufferHoldQueue; /* temporary hold queue for back compatibility */
A_UINT8 SeqNo; /* TX seq no (helpful) for debugging */
cdf_atomic_t TxProcessCount; /* serialization */
qdf_atomic_t TxProcessCount; /* serialization */
struct _HTC_TARGET *target;
int TxCredits; /* TX credits available on this endpoint */
int TxCreditSize; /* size in bytes of each credit (set by HTC) */
@@ -155,10 +155,10 @@ enum ol_ath_htc_pkt_ecodes {
typedef struct _HTC_TARGET {
struct hif_opaque_softc *hif_dev;
HTC_ENDPOINT endpoint[ENDPOINT_MAX];
cdf_spinlock_t HTCLock;
cdf_spinlock_t HTCRxLock;
cdf_spinlock_t HTCTxLock;
cdf_spinlock_t HTCCreditLock;
qdf_spinlock_t HTCLock;
qdf_spinlock_t HTCRxLock;
qdf_spinlock_t HTCTxLock;
qdf_spinlock_t HTCCreditLock;
A_UINT32 HTCStateFlags;
void *host_handle;
HTC_INIT_INFO HTCInitInfo;
@@ -166,7 +166,7 @@ typedef struct _HTC_TARGET {
HTC_PACKET_QUEUE ControlBufferTXFreeList;
A_UINT8 CtrlResponseBuffer[HTC_MAX_CONTROL_MESSAGE_LENGTH];
int CtrlResponseLength;
cdf_event_t ctrl_response_valid;
qdf_event_t ctrl_response_valid;
A_BOOL CtrlResponseProcessing;
int TotalTransmitCredits;
HTC_SERVICE_TX_CREDIT_ALLOCATION
@@ -178,13 +178,13 @@ typedef struct _HTC_TARGET {
A_UINT32 CurRxSgTotalLen; /* current total length */
A_UINT32 ExpRxSgTotalLen; /* expected total length */
#endif
cdf_device_t osdev;
qdf_device_t osdev;
struct ol_ath_htc_stats htc_pkt_stats;
HTC_PACKET *pBundleFreeList;
A_UINT32 ce_send_cnt;
A_UINT32 TX_comp_cnt;
A_UINT8 MaxMsgsPerHTCBundle;
cdf_work_t queue_kicker;
qdf_work_t queue_kicker;
} HTC_TARGET;
#define HTC_ENABLE_BUNDLE(target) (target->MaxMsgsPerHTCBundle > 1)
@@ -197,14 +197,14 @@ typedef struct _HTC_TARGET {
#define HTC_STATE_STOPPING (1 << 0)
#define HTC_STOPPING(t) ((t)->HTCStateFlags & HTC_STATE_STOPPING)
#define LOCK_HTC(t) cdf_spin_lock_bh(&(t)->HTCLock);
#define UNLOCK_HTC(t) cdf_spin_unlock_bh(&(t)->HTCLock);
#define LOCK_HTC_RX(t) cdf_spin_lock_bh(&(t)->HTCRxLock);
#define UNLOCK_HTC_RX(t) cdf_spin_unlock_bh(&(t)->HTCRxLock);
#define LOCK_HTC_TX(t) cdf_spin_lock_bh(&(t)->HTCTxLock);
#define UNLOCK_HTC_TX(t) cdf_spin_unlock_bh(&(t)->HTCTxLock);
#define LOCK_HTC_CREDIT(t) cdf_spin_lock_bh(&(t)->HTCCreditLock);
#define UNLOCK_HTC_CREDIT(t) cdf_spin_unlock_bh(&(t)->HTCCreditLock);
#define LOCK_HTC(t) qdf_spin_lock_bh(&(t)->HTCLock);
#define UNLOCK_HTC(t) qdf_spin_unlock_bh(&(t)->HTCLock);
#define LOCK_HTC_RX(t) qdf_spin_lock_bh(&(t)->HTCRxLock);
#define UNLOCK_HTC_RX(t) qdf_spin_unlock_bh(&(t)->HTCRxLock);
#define LOCK_HTC_TX(t) qdf_spin_lock_bh(&(t)->HTCTxLock);
#define UNLOCK_HTC_TX(t) qdf_spin_unlock_bh(&(t)->HTCTxLock);
#define LOCK_HTC_CREDIT(t) qdf_spin_lock_bh(&(t)->HTCCreditLock);
#define UNLOCK_HTC_CREDIT(t) qdf_spin_unlock_bh(&(t)->HTCCreditLock);
#define GET_HTC_TARGET_FROM_HANDLE(hnd) ((HTC_TARGET *)(hnd))
@@ -213,17 +213,23 @@ typedef struct _HTC_TARGET {
#define HTC_POLL_CLEANUP_PERIOD_MS 10 /* milliseconds */
/* Macro to Increment the HTC_PACKET_ERRORS for Tx.*/
#define OL_ATH_HTC_PKT_ERROR_COUNT_INCR(_target,_ecode) \
#define OL_ATH_HTC_PKT_ERROR_COUNT_INCR(_target, _ecode) \
do { \
if(_ecode==GET_HTC_PKT_Q_FAIL) (_target->htc_pkt_stats.htc_get_pkt_q_fail_count)+=1; \
if(_ecode==HTC_PKT_Q_EMPTY) (_target->htc_pkt_stats.htc_pkt_q_empty_count)+=1; \
if(_ecode==HTC_SEND_Q_EMPTY) (_target->htc_pkt_stats.htc_send_q_empty_count)+=1; \
} while(0);
if (_ecode == GET_HTC_PKT_Q_FAIL) \
(_target->htc_pkt_stats.htc_get_pkt_q_fail_count) += 1 \
; \
if (_ecode == HTC_PKT_Q_EMPTY) \
(_target->htc_pkt_stats.htc_pkt_q_empty_count) += 1 \
; \
if (_ecode == HTC_SEND_Q_EMPTY) \
(_target->htc_pkt_stats.htc_send_q_empty_count) += 1 \
; \
} while (0);
/* internal HTC functions */
CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
QDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
uint8_t pipeID);
CDF_STATUS htc_tx_completion_handler(void *Context, cdf_nbuf_t netbuf,
QDF_STATUS htc_tx_completion_handler(void *Context, cdf_nbuf_t netbuf,
unsigned int transferID, uint32_t toeplitz_hash_result);
HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target);
@@ -244,7 +250,7 @@ void htc_control_rx_complete(void *Context, HTC_PACKET *pPacket);
void htc_process_credit_rpt(HTC_TARGET *target,
HTC_CREDIT_REPORT *pRpt,
int NumEntries, HTC_ENDPOINT_ID FromEndpoint);
void htc_fw_event_handler(void *context, CDF_STATUS status);
void htc_fw_event_handler(void *context, QDF_STATUS status);
void htc_send_complete_check_cleanup(void *context);
void htc_runtime_pm_init(HTC_TARGET *target);
void htc_kick_queues(void *context);
@@ -256,7 +262,7 @@ static inline void htc_send_complete_poll_timer_stop(HTC_ENDPOINT *
pEndpoint) {
LOCK_HTC_TX(pEndpoint->target);
if (pEndpoint->ul_poll_timer_active) {
/* cdf_softirq_timer_cancel(&pEndpoint->ul_poll_timer); */
/* qdf_timer_stop(&pEndpoint->ul_poll_timer); */
pEndpoint->ul_poll_timer_active = 0;
}
UNLOCK_HTC_TX(pEndpoint->target);
@@ -268,7 +274,7 @@ static inline void htc_send_complete_poll_timer_start(HTC_ENDPOINT *
if (pEndpoint->ul_outstanding_cnt
&& !pEndpoint->ul_poll_timer_active) {
/*
cdf_softirq_timer_start(
qdf_timer_start(
&pEndpoint->ul_poll_timer, HTC_POLL_CLEANUP_PERIOD_MS);
*/
pEndpoint->ul_poll_timer_active = 1;

Visa fil

@@ -95,11 +95,11 @@ typedef struct _HTC_PACKET {
void *pPktContext; /* caller's per packet specific context */
A_UINT8 *pBufferStart; /* the true buffer start , the caller can
store the real buffer start here. In
receive callbacks, the HTC layer sets pBuffer
to the start of the payload past the header. This
field allows the caller to reset pBuffer when it
recycles receive packets back to HTC */
store the real buffer start here. In
receive callbacks, the HTC layer sets pBuffer
to the start of the payload past the header. This
field allows the caller to reset pBuffer when it
recycles receive packets back to HTC */
/*
* Pointer to the start of the buffer. In the transmit
* direction this points to the start of the payload. In the
@@ -122,40 +122,40 @@ typedef struct _HTC_PACKET {
HTC_PACKET_COMPLETION Completion; /* completion */
void *pContext; /* HTC private completion context */
void *pNetBufContext; /* optimization for network-oriented data, the HTC packet
can pass the network buffer corresponding to the HTC packet
lower layers may optimized the transfer knowing this is
a network buffer */
can pass the network buffer corresponding to the HTC packet
lower layers may optimized the transfer knowing this is
a network buffer */
} HTC_PACKET;
#define COMPLETE_HTC_PACKET(p,status) \
#define COMPLETE_HTC_PACKET(p, status) \
{ \
(p)->Status = (status); \
(p)->Completion((p)->pContext,(p)); \
(p)->Completion((p)->pContext, (p)); \
}
#define INIT_HTC_PACKET_INFO(p,b,len) \
#define INIT_HTC_PACKET_INFO(p, b, len) \
{ \
(p)->pBufferStart = (b); \
(p)->BufferLength = (len); \
}
/* macro to set an initial RX packet for refilling HTC */
#define SET_HTC_PACKET_INFO_RX_REFILL(p,c,b,len,ep) \
{ \
#define SET_HTC_PACKET_INFO_RX_REFILL(p, c, b, len, ep) \
do { \
(p)->pPktContext = (c); \
(p)->pBuffer = (b); \
(p)->pBufferStart = (b); \
(p)->BufferLength = (len); \
(p)->Endpoint = (ep); \
}
} while (0)
/* fast macro to recycle an RX packet that will be re-queued to HTC */
#define HTC_PACKET_RESET_RX(p) \
{ (p)->pBuffer = (p)->pBufferStart; (p)->ActualLength = 0; }
/* macro to set packet parameters for TX */
#define SET_HTC_PACKET_INFO_TX(p,c,b,len,ep,tag) \
{ \
#define SET_HTC_PACKET_INFO_TX(p, c, b, len, ep, tag) \
do { \
(p)->pPktContext = (c); \
(p)->pBuffer = (b); \
(p)->ActualLength = (len); \
@@ -163,10 +163,12 @@ typedef struct _HTC_PACKET {
(p)->PktInfo.AsTx.Tag = (tag); \
(p)->PktInfo.AsTx.Flags = 0; \
(p)->PktInfo.AsTx.SendFlags = 0; \
}
} while (0)
#define SET_HTC_PACKET_NET_BUF_CONTEXT(p,nb) \
(p)->pNetBufContext = (nb)
#define SET_HTC_PACKET_NET_BUF_CONTEXT(p, nb) \
do { \
(p)->pNetBufContext = (nb); \
} while (0)
#define GET_HTC_PACKET_NET_BUF_CONTEXT(p) (p)->pNetBufContext
@@ -179,25 +181,25 @@ typedef struct _HTC_PACKET_QUEUE {
/* initialize queue */
#define INIT_HTC_PACKET_QUEUE(pQ) \
{ \
DL_LIST_INIT(& (pQ)->QueueHead); \
DL_LIST_INIT(&(pQ)->QueueHead); \
(pQ)->Depth = 0; \
}
/* enqueue HTC packet to the tail of the queue */
#define HTC_PACKET_ENQUEUE(pQ,p) \
{ dl_list_insert_tail(& (pQ)->QueueHead,& (p)->ListLink); \
(pQ)->Depth ++; \
#define HTC_PACKET_ENQUEUE(pQ, p) \
{ dl_list_insert_tail(&(pQ)->QueueHead, &(p)->ListLink); \
(pQ)->Depth++; \
}
/* enqueue HTC packet to the tail of the queue */
#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ,p) \
{ dl_list_insert_head(& (pQ)->QueueHead,& (p)->ListLink); \
(pQ)->Depth ++; \
#define HTC_PACKET_ENQUEUE_TO_HEAD(pQ, p) \
{ dl_list_insert_head(&(pQ)->QueueHead, &(p)->ListLink); \
(pQ)->Depth++; \
}
/* test if a queue is empty */
#define HTC_QUEUE_EMPTY(pQ) ((pQ)->Depth == 0)
/* get packet at head without removing it */
static INLINE HTC_PACKET *htc_get_pkt_at_head(HTC_PACKET_QUEUE *queue)
static inline HTC_PACKET *htc_get_pkt_at_head(HTC_PACKET_QUEUE *queue)
{
if (queue->Depth == 0) {
return NULL;
@@ -208,14 +210,14 @@ static INLINE HTC_PACKET *htc_get_pkt_at_head(HTC_PACKET_QUEUE *queue)
}
/* remove a packet from a queue, where-ever it is in the queue */
#define HTC_PACKET_REMOVE(pQ,p) \
#define HTC_PACKET_REMOVE(pQ, p) \
{ \
dl_list_remove(& (p)->ListLink); \
(pQ)->Depth --; \
dl_list_remove(&(p)->ListLink); \
(pQ)->Depth--; \
}
/* dequeue an HTC packet from the head of the queue */
static INLINE HTC_PACKET *htc_packet_dequeue(HTC_PACKET_QUEUE *queue)
static inline HTC_PACKET *htc_packet_dequeue(HTC_PACKET_QUEUE *queue)
{
DL_LIST *pItem = dl_list_remove_item_from_head(&queue->QueueHead);
if (pItem != NULL) {
@@ -226,7 +228,7 @@ static INLINE HTC_PACKET *htc_packet_dequeue(HTC_PACKET_QUEUE *queue)
}
/* dequeue an HTC packet from the tail of the queue */
static INLINE HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue)
static inline HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue)
{
DL_LIST *pItem = dl_list_remove_item_from_tail(&queue->QueueHead);
if (pItem != NULL) {
@@ -242,9 +244,9 @@ static INLINE HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue)
#define HTC_GET_TAG_FROM_PKT(p) (p)->PktInfo.AsTx.Tag
/* transfer the packets from one queue to the tail of another queue */
#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest,pQSrc) \
#define HTC_PACKET_QUEUE_TRANSFER_TO_TAIL(pQDest, pQSrc) \
{ \
dl_list_transfer_items_to_tail(&(pQDest)->QueueHead,&(pQSrc)->QueueHead); \
dl_list_transfer_items_to_tail(&(pQDest)->QueueHead, &(pQSrc)->QueueHead); \
(pQDest)->Depth += (pQSrc)->Depth; \
(pQSrc)->Depth = 0; \
}
@@ -257,20 +259,20 @@ static INLINE HTC_PACKET *htc_packet_dequeue_tail(HTC_PACKET_QUEUE *queue)
*/
#define HTC_PACKET_QUEUE_TRANSFER_TO_HEAD(pQDest, pQSrc) \
{ \
dl_list_transfer_items_to_head(&(pQDest)->QueueHead,&(pQSrc)->QueueHead); \
dl_list_transfer_items_to_head(&(pQDest)->QueueHead, &(pQSrc)->QueueHead); \
(pQDest)->Depth += (pQSrc)->Depth; \
(pQSrc)->Depth = 0; \
}
/* fast version to init and add a single packet to a queue */
#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ,pP) \
#define INIT_HTC_PACKET_QUEUE_AND_ADD(pQ, pP) \
{ \
DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead,&(pP)->ListLink) \
DL_LIST_INIT_AND_ADD(&(pQ)->QueueHead, &(pP)->ListLink) \
(pQ)->Depth = 1; \
}
#define HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(pQ, pPTemp) \
ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead,(pPTemp), HTC_PACKET, ListLink)
ITERATE_OVER_LIST_ALLOW_REMOVE(&(pQ)->QueueHead, (pPTemp), HTC_PACKET, ListLink)
#define HTC_PACKET_QUEUE_ITERATE_IS_VALID(pQ) ITERATE_IS_VALID(&(pQ)->QueueHead)
#define HTC_PACKET_QUEUE_ITERATE_RESET(pQ) ITERATE_RESET(&(pQ)->QueueHead)

Visa fil

@@ -240,7 +240,7 @@ cdf_nbuf_t rx_sg_to_single_netbuf(HTC_TARGET *target)
skb = cdf_nbuf_queue_remove(rx_sg_queue);
do {
cdf_nbuf_peek_header(skb, &anbdata, &anblen);
cdf_mem_copy(anbdata_new, anbdata, cdf_nbuf_len(skb));
qdf_mem_copy(anbdata_new, anbdata, cdf_nbuf_len(skb));
cdf_nbuf_put_tail(new_skb, cdf_nbuf_len(skb));
anbdata_new += cdf_nbuf_len(skb);
cdf_nbuf_free(skb);
@@ -261,10 +261,10 @@ _failed:
}
#endif
CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
QDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
uint8_t pipeID)
{
CDF_STATUS status = CDF_STATUS_SUCCESS;
QDF_STATUS status = QDF_STATUS_SUCCESS;
HTC_FRAME_HDR *HtcHdr;
HTC_TARGET *target = (HTC_TARGET *) Context;
uint8_t *netdata;
@@ -310,8 +310,8 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
htc_ep_id));
debug_dump_bytes((A_UINT8 *) HtcHdr,
sizeof(HTC_FRAME_HDR), "BAD HTC Header");
status = CDF_STATUS_E_FAILURE;
CDF_BUG(0);
status = QDF_STATUS_E_FAILURE;
QDF_BUG(0);
break;
}
@@ -347,8 +347,8 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
debug_dump_bytes((A_UINT8 *) HtcHdr,
sizeof(HTC_FRAME_HDR),
"BAD RX packet length");
status = CDF_STATUS_E_FAILURE;
CDF_BUG(0);
status = QDF_STATUS_E_FAILURE;
QDF_BUG(0);
break;
#endif
}
@@ -374,7 +374,7 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("htc_rx_completion_handler, invalid header (payloadlength should be :%d, CB[0] is:%d)\n",
payloadLen, temp));
status = CDF_STATUS_E_INVAL;
status = QDF_STATUS_E_INVAL;
break;
}
@@ -386,7 +386,7 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
payloadLen - temp),
temp, htc_ep_id);
if (A_FAILED(temp_status)) {
status = CDF_STATUS_E_FAILURE;
status = QDF_STATUS_E_FAILURE;
break;
}
@@ -420,8 +420,8 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
* on the endpoint 0 */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("HTC Rx Ctrl still processing\n"));
status = CDF_STATUS_E_FAILURE;
CDF_BUG(false);
status = QDF_STATUS_E_FAILURE;
QDF_BUG(false);
break;
}
@@ -436,7 +436,7 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
target->CtrlResponseProcessing = true;
UNLOCK_HTC_RX(target);
cdf_event_set(&target->ctrl_response_valid);
qdf_event_set(&target->ctrl_response_valid);
break;
case HTC_MSG_SEND_SUSPEND_COMPLETE:
wow_nack = 0;
@@ -476,10 +476,10 @@ CDF_STATUS htc_rx_completion_handler(void *Context, cdf_nbuf_t netbuf,
* TODO_FIXME */
pPacket = allocate_htc_packet_container(target);
if (NULL == pPacket) {
status = CDF_STATUS_E_RESOURCES;
status = QDF_STATUS_E_RESOURCES;
break;
}
pPacket->Status = CDF_STATUS_SUCCESS;
pPacket->Status = QDF_STATUS_SUCCESS;
pPacket->Endpoint = htc_ep_id;
pPacket->pPktContext = netbuf;
pPacket->pBuffer = cdf_nbuf_data(netbuf) + HTC_HDR_LENGTH;
@@ -601,7 +601,7 @@ void htc_flush_rx_hold_queue(HTC_TARGET *target, HTC_ENDPOINT *pEndpoint)
void htc_recv_init(HTC_TARGET *target)
{
/* Initialize ctrl_response_valid to block */
cdf_event_init(&target->ctrl_response_valid);
qdf_event_create(&target->ctrl_response_valid);
}
/* polling routine to wait for a control packet to be received */
@@ -612,9 +612,9 @@ A_STATUS htc_wait_recv_ctrl_message(HTC_TARGET *target)
AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("+HTCWaitCtrlMessageRecv\n"));
/* Wait for BMI request/response transaction to complete */
if (cdf_wait_single_event(&target->ctrl_response_valid,
cdf_system_msecs_to_ticks(HTC_CONTROL_RX_TIMEOUT))) {
CDF_BUG(0);
if (qdf_wait_single_event(&target->ctrl_response_valid,
qdf_system_msecs_to_ticks(HTC_CONTROL_RX_TIMEOUT))) {
QDF_BUG(0);
return A_ERROR;
}

Visa fil

@@ -28,7 +28,7 @@
#include "htc_debug.h"
#include "htc_internal.h"
#include <cdf_nbuf.h> /* cdf_nbuf_t */
#include <cdf_memory.h> /* cdf_mem_malloc */
#include <qdf_mem.h> /* qdf_mem_malloc */
#include "epping_main.h"
/* #define USB_HIF_SINGLE_PIPE_DATA_SCHED */
@@ -80,7 +80,7 @@ void htc_credit_record(htc_credit_exchange_type type, uint32_t tx_credit,
htc_credit_history_buffer[g_htc_credit_history_idx].type = type;
htc_credit_history_buffer[g_htc_credit_history_idx].time =
cdf_get_log_timestamp();
qdf_get_log_timestamp();
htc_credit_history_buffer[g_htc_credit_history_idx].tx_credit =
tx_credit;
htc_credit_history_buffer[g_htc_credit_history_idx].htc_tx_queue_depth =
@@ -130,11 +130,11 @@ void htc_get_control_endpoint_tx_host_credits(HTC_HANDLE HTCHandle, int *credits
UNLOCK_HTC_TX(target);
}
static INLINE void restore_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket)
static inline void restore_tx_packet(HTC_TARGET *target, HTC_PACKET *pPacket)
{
if (pPacket->PktInfo.AsTx.Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) {
cdf_nbuf_t netbuf = GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket);
cdf_nbuf_unmap(target->osdev, netbuf, CDF_DMA_TO_DEVICE);
cdf_nbuf_unmap(target->osdev, netbuf, QDF_DMA_TO_DEVICE);
cdf_nbuf_pull_head(netbuf, sizeof(HTC_FRAME_HDR));
pPacket->PktInfo.AsTx.Flags &= ~HTC_TX_PACKET_FLAG_FIXUP_NETBUF;
}
@@ -216,17 +216,17 @@ HTC_PACKET *allocate_htc_bundle_packet(HTC_TARGET *target)
if (!netbuf) {
return NULL;
}
pPacket = cdf_mem_malloc(sizeof(HTC_PACKET));
pPacket = qdf_mem_malloc(sizeof(HTC_PACKET));
AR_DEBUG_ASSERT(pPacket);
if (!pPacket) {
cdf_nbuf_free(netbuf);
return NULL;
}
pQueueSave = cdf_mem_malloc(sizeof(HTC_PACKET_QUEUE));
pQueueSave = qdf_mem_malloc(sizeof(HTC_PACKET_QUEUE));
AR_DEBUG_ASSERT(pQueueSave);
if (!pQueueSave) {
cdf_nbuf_free(netbuf);
cdf_mem_free(pPacket);
qdf_mem_free(pPacket);
return NULL;
}
INIT_HTC_PACKET_QUEUE(pQueueSave);
@@ -305,7 +305,7 @@ static A_STATUS htc_send_bundled_netbuf(HTC_TARGET *target,
unsigned char *pBundleBuffer,
HTC_PACKET *pPacketTx)
{
cdf_size_t data_len;
qdf_size_t data_len;
A_STATUS status;
cdf_nbuf_t bundleBuf;
uint32_t data_attr = 0;
@@ -322,7 +322,7 @@ static A_STATUS htc_send_bundled_netbuf(HTC_TARGET *target,
HTC_PACKET_ENQUEUE(&pEndpoint->TxLookupQueue, pPacketTx);
UNLOCK_HTC_TX(target);
#if DEBUG_BUNDLE
cdf_print(" Send bundle EP%d buffer size:0x%x, total:0x%x, count:%d.\n",
qdf_print(" Send bundle EP%d buffer size:0x%x, total:0x%x, count:%d.\n",
pEndpoint->Id,
pEndpoint->TxCreditSize,
data_len, data_len / pEndpoint->TxCreditSize);
@@ -331,7 +331,7 @@ static A_STATUS htc_send_bundled_netbuf(HTC_TARGET *target,
pEndpoint->UL_PipeID,
pEndpoint->Id, data_len, bundleBuf, data_attr);
if (status != A_OK) {
cdf_print("%s:hif_send_head failed(len=%d).\n", __FUNCTION__,
qdf_print("%s:hif_send_head failed(len=%d).\n", __FUNCTION__,
data_len);
}
return status;
@@ -522,7 +522,7 @@ static A_STATUS htc_issue_packets(HTC_TARGET *target,
Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) {
cdf_nbuf_map(target->osdev,
GET_HTC_PACKET_NET_BUF_CONTEXT
(pPacket), CDF_DMA_TO_DEVICE);
(pPacket), QDF_DMA_TO_DEVICE);
}
}
LOCK_HTC_TX(target);
@@ -538,7 +538,7 @@ static A_STATUS htc_issue_packets(HTC_TARGET *target,
HTC_HDR_LENGTH + pPacket->ActualLength,
netbuf, data_attr);
#if DEBUG_BUNDLE
cdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.\n",
qdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.\n",
pEndpoint->Id,
pEndpoint->TxCreditSize,
HTC_HDR_LENGTH + pPacket->ActualLength);
@@ -546,7 +546,7 @@ static A_STATUS htc_issue_packets(HTC_TARGET *target,
target->ce_send_cnt++;
if (cdf_unlikely(A_FAILED(status))) {
if (qdf_unlikely(A_FAILED(status))) {
if (status != A_NO_RESOURCE) {
/* TODO : if more than 1 endpoint maps to the same PipeID it is possible
* to run out of resources in the HIF layer. Don't emit the error */
@@ -585,7 +585,7 @@ static A_STATUS htc_issue_packets(HTC_TARGET *target,
if (pPacket->PktInfo.AsTx.Tag == HTC_TX_PACKET_TAG_RUNTIME_PUT)
hif_pm_runtime_put(target->hif_dev);
}
if (cdf_unlikely(A_FAILED(status))) {
if (qdf_unlikely(A_FAILED(status))) {
#if defined(HIF_USB)
if (pEndpoint->Id >= ENDPOINT_2 && pEndpoint->Id <= ENDPOINT_5)
target->avail_tx_credits +=
@@ -688,7 +688,7 @@ void get_htc_send_packets_credit_based(HTC_TARGET *target,
while (true) {
if (do_pm_get && hif_pm_runtime_get(target->hif_dev)) {
/* bus suspended, runtime resume issued */
CDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0);
QDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0);
break;
}
@@ -840,7 +840,7 @@ void get_htc_send_packets(HTC_TARGET *target,
if (do_pm_get && hif_pm_runtime_get(target->hif_dev)) {
/* bus suspended, runtime resume issued */
CDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0);
QDF_ASSERT(HTC_PACKET_QUEUE_DEPTH(pQueue) == 0);
break;
}
@@ -878,7 +878,7 @@ void get_htc_send_packets(HTC_TARGET *target,
num_frags =
(pPacket->PktInfo.AsTx.
Flags & HTC_TX_PACKET_FLAG_FIXUP_NETBUF) ? 1
/* WMI messages are in a single-fragment network buffer */ :
/* WMI messages are in a single-fragment network buffer */ :
cdf_nbuf_get_num_frags(GET_HTC_PACKET_NET_BUF_CONTEXT
(pPacket));
Resources -= num_frags;
@@ -1043,11 +1043,11 @@ static HTC_SEND_QUEUE_RESULT htc_try_send(HTC_TARGET *target,
}
/* increment tx processing count on entry */
cdf_atomic_inc(&pEndpoint->TxProcessCount);
if (cdf_atomic_read(&pEndpoint->TxProcessCount) > 1) {
qdf_atomic_inc(&pEndpoint->TxProcessCount);
if (qdf_atomic_read(&pEndpoint->TxProcessCount) > 1) {
/* another thread or task is draining the TX queues on this endpoint
* that thread will reset the tx processing count when the queue is drained */
cdf_atomic_dec(&pEndpoint->TxProcessCount);
qdf_atomic_dec(&pEndpoint->TxProcessCount);
UNLOCK_HTC_TX(target);
AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send (busy) \n"));
return HTC_SEND_QUEUE_OK;
@@ -1125,7 +1125,7 @@ static HTC_SEND_QUEUE_RESULT htc_try_send(HTC_TARGET *target,
UNLOCK_HTC_TX(target);
/* done with this endpoint, we can clear the count */
cdf_atomic_init(&pEndpoint->TxProcessCount);
qdf_atomic_init(&pEndpoint->TxProcessCount);
AR_DEBUG_PRINTF(ATH_DEBUG_SEND, ("-htc_try_send: \n"));
@@ -1157,15 +1157,15 @@ static A_UINT16 htc_send_pkts_sched_check(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID
switch (id) {
case ENDPOINT_2: /* BE */
return (acQueueStatus[0] && acQueueStatus[2]
&& acQueueStatus[3]);
return acQueueStatus[0] && acQueueStatus[2]
&& acQueueStatus[3];
case ENDPOINT_3: /* BK */
return (acQueueStatus[0] && acQueueStatus[1] && acQueueStatus[2]
&& acQueueStatus[3]);
return acQueueStatus[0] && acQueueStatus[1] && acQueueStatus[2]
&& acQueueStatus[3];
case ENDPOINT_4: /* VI */
return (acQueueStatus[2] && acQueueStatus[3]);
return acQueueStatus[2] && acQueueStatus[3];
case ENDPOINT_5: /* VO */
return (acQueueStatus[3]);
return acQueueStatus[3];
default:
return 0;
}
@@ -1291,7 +1291,7 @@ A_STATUS htc_send_pkts_multiple(HTC_HANDLE HTCHandle, HTC_PACKET_QUEUE *pPktQueu
*/
cdf_nbuf_map(target->osdev,
GET_HTC_PACKET_NET_BUF_CONTEXT(pPacket),
CDF_DMA_TO_DEVICE);
QDF_DMA_TO_DEVICE);
pPacket->PktInfo.AsTx.Flags |= HTC_TX_PACKET_FLAG_FIXUP_NETBUF;
}
@@ -1410,7 +1410,7 @@ A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, cdf_nbuf_t netbuf, int Epid,
pEndpoint->SeqNo++;
NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_HTC);
DPTRACE(cdf_dp_trace(netbuf, CDF_DP_TRACE_HTC_PACKET_PTR_RECORD,
DPTRACE(qdf_dp_trace(netbuf, QDF_DP_TRACE_HTC_PACKET_PTR_RECORD,
(uint8_t *)(cdf_nbuf_data(netbuf)),
sizeof(cdf_nbuf_data(netbuf))));
status = hif_send_head(target->hif_dev,
@@ -1492,14 +1492,14 @@ A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket,
}
/* increment tx processing count on entry */
cdf_atomic_inc(&pEndpoint->TxProcessCount);
if (cdf_atomic_read(&pEndpoint->TxProcessCount) > 1) {
qdf_atomic_inc(&pEndpoint->TxProcessCount);
if (qdf_atomic_read(&pEndpoint->TxProcessCount) > 1) {
/*
* Another thread or task is draining the TX queues on this endpoint.
* That thread will reset the tx processing count when the queue is
* drained.
*/
cdf_atomic_dec(&pEndpoint->TxProcessCount);
qdf_atomic_dec(&pEndpoint->TxProcessCount);
UNLOCK_HTC_TX(target);
return A_OK;
}
@@ -1545,7 +1545,7 @@ A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket,
UNLOCK_HTC_TX(target);
}
NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_HTC);
DPTRACE(cdf_dp_trace(netbuf, CDF_DP_TRACE_HTC_PACKET_PTR_RECORD,
DPTRACE(qdf_dp_trace(netbuf, QDF_DP_TRACE_HTC_PACKET_PTR_RECORD,
(uint8_t *)(cdf_nbuf_data(netbuf)),
sizeof(cdf_nbuf_data(netbuf))));
@@ -1579,13 +1579,13 @@ A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket,
HTC_HDR_LENGTH + pPacket->ActualLength,
netbuf, data_attr);
#if DEBUG_BUNDLE
cdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.\n",
qdf_print(" Send single EP%d buffer size:0x%x, total:0x%x.\n",
pEndpoint->Id,
pEndpoint->TxCreditSize,
HTC_HDR_LENGTH + pPacket->ActualLength);
#endif
if (cdf_unlikely(A_FAILED(status))) {
if (qdf_unlikely(A_FAILED(status))) {
LOCK_HTC_TX(target);
pEndpoint->ul_outstanding_cnt--;
/* remove this packet from the tx completion queue */
@@ -1612,7 +1612,7 @@ A_STATUS htc_send_data_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket,
}
}
/* done with this endpoint, we can clear the count */
cdf_atomic_init(&pEndpoint->TxProcessCount);
qdf_atomic_init(&pEndpoint->TxProcessCount);
if (pEndpoint->ul_is_polled) {
/*
@@ -1651,7 +1651,7 @@ static HTC_PACKET *htc_lookup_tx_packet(HTC_TARGET *target,
/* Dequeue first packet directly because of in-order completion */
pPacket = htc_packet_dequeue(&pEndpoint->TxLookupQueue);
if (cdf_unlikely(!pPacket)) {
if (qdf_unlikely(!pPacket)) {
UNLOCK_HTC_TX(target);
return NULL;
}
@@ -1697,7 +1697,7 @@ static HTC_PACKET *htc_lookup_tx_packet(HTC_TARGET *target,
return pFoundPacket;
}
CDF_STATUS htc_tx_completion_handler(void *Context,
QDF_STATUS htc_tx_completion_handler(void *Context,
cdf_nbuf_t netbuf, unsigned int EpID,
uint32_t toeplitz_hash_result)
{
@@ -1705,8 +1705,8 @@ CDF_STATUS htc_tx_completion_handler(void *Context,
HTC_ENDPOINT *pEndpoint;
HTC_PACKET *pPacket;
#ifdef USB_HIF_SINGLE_PIPE_DATA_SCHED
HTC_ENDPOINT_ID eid[DATA_EP_SIZE] =
{ ENDPOINT_5, ENDPOINT_4, ENDPOINT_2, ENDPOINT_3 };
HTC_ENDPOINT_ID eid[DATA_EP_SIZE] = { ENDPOINT_5, ENDPOINT_4,
ENDPOINT_2, ENDPOINT_3 };
int epidIdx;
A_UINT16 resourcesThresh[DATA_EP_SIZE]; /* urb resources */
A_UINT16 resources;
@@ -1739,11 +1739,11 @@ CDF_STATUS htc_tx_completion_handler(void *Context,
}
HTC_PACKET_QUEUE_ITERATE_END;
free_htc_bundle_packet(target, pPacket);
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/* will be giving this buffer back to upper layers */
netbuf = NULL;
pPacket->Status = CDF_STATUS_SUCCESS;
pPacket->Status = QDF_STATUS_SUCCESS;
send_packet_completion(target, pPacket);
} while (false);
@@ -1755,7 +1755,7 @@ CDF_STATUS htc_tx_completion_handler(void *Context,
htc_try_send(target, pEndpoint, NULL);
}
return CDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
}
/* callback when TX resources become available */
@@ -1928,8 +1928,8 @@ void htc_process_credit_rpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt,
#endif
#if defined(HIF_USB)
if (pEndpoint->Id >= ENDPOINT_2 && pEndpoint->Id <= ENDPOINT_5) {
HTC_ENDPOINT_ID eid[DATA_EP_SIZE] =
{ ENDPOINT_5, ENDPOINT_4, ENDPOINT_2, ENDPOINT_3 };
HTC_ENDPOINT_ID eid[DATA_EP_SIZE] = { ENDPOINT_5,
ENDPOINT_4, ENDPOINT_2, ENDPOINT_3 };
int epid_idx;
target->avail_tx_credits += rpt_credits;
@@ -1998,5 +1998,5 @@ struct ol_ath_htc_stats *ieee80211_ioctl_get_htc_stats(HTC_HANDLE HTCHandle)
{
HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
return (&(target->htc_pkt_stats));
return &(target->htc_pkt_stats);
}

Visa fil

@@ -310,7 +310,7 @@ A_STATUS htc_connect_service(HTC_HANDLE HTCHandle,
pEndpoint->TxCreditsPerMaxMsg++;
}
#if DEBUG_CREDIT
cdf_print(" Endpoint%d initial credit:%d, size:%d.\n",
qdf_print(" Endpoint%d initial credit:%d, size:%d.\n",
pEndpoint->Id, pEndpoint->TxCredits,
pEndpoint->TxCreditSize);
#endif
@@ -328,14 +328,14 @@ A_STATUS htc_connect_service(HTC_HANDLE HTCHandle,
break;
}
cdf_assert(!pEndpoint->dl_is_polled); /* not currently supported */
qdf_assert(!pEndpoint->dl_is_polled); /* not currently supported */
if (pEndpoint->ul_is_polled) {
cdf_softirq_timer_init(target->osdev,
qdf_timer_init(target->osdev,
&pEndpoint->ul_poll_timer,
htc_send_complete_check_cleanup,
pEndpoint,
CDF_TIMER_TYPE_SW);
QDF_TIMER_TYPE_SW);
}
AR_DEBUG_PRINTF(ATH_DEBUG_SETUP,
@@ -372,7 +372,7 @@ void htc_set_credit_distribution(HTC_HANDLE HTCHandle,
}
void htc_fw_event_handler(void *context, CDF_STATUS status)
void htc_fw_event_handler(void *context, QDF_STATUS status)
{
HTC_TARGET *target = (HTC_TARGET *) context;
HTC_INIT_INFO *initInfo = &target->HTCInitInfo;