qcacmn: Add APIs to set/get ce service max yield time

Add APIs to set/get ce service max yield time and update
max time taken in NAPI poll from the driver load time.

Change-Id: Idfd4a271ce13916f188c92ab3af32e1648f48c95
CRs-Fixed: 2055082
This commit is contained in:
Himanshu Agarwal
2017-05-23 11:06:12 +05:30
committed by nshrivas
parent 23668cdf4e
commit d9d0e52555
7 changed files with 88 additions and 12 deletions

View File

@@ -171,6 +171,7 @@ struct qca_napi_stat {
uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
uint32_t time_limit_reached;
uint32_t rxpkt_thresh_reached;
unsigned long long napi_max_poll_time;
};
@@ -920,4 +921,28 @@ ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
#endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
/**
* hif_set_ce_service_max_yield_time() - sets CE service max yield time
* @hif: hif context
* @ce_service_max_yield_time: CE service max yield time to set
*
* This API storess CE service max yield time in hif context based
* on ini value.
*
* Return: void
*/
void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
uint8_t ce_service_max_yield_time);
/**
* hif_get_ce_service_max_yield_time() - get CE service max yield time
* @hif: hif context
*
* This API returns CE service max yield time.
*
* Return: CE service max yield time
*/
unsigned long long
hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
#endif /* _HIF_H_ */

View File

@@ -141,6 +141,8 @@ struct CE_state {
/* time in nanoseconds to yield control of napi poll */
unsigned long long ce_service_yield_time;
/* CE service start time in nanoseconds */
unsigned long long ce_service_start_time;
/* Num Of Receive Buffers handled for one interrupt DPC routine */
unsigned int receive_count;
/* epping */

View File

@@ -1992,6 +1992,10 @@ more_data:
more_comp_cnt = 0;
goto more_data;
}
hif_update_napi_max_poll_time(ce_state, scn->napi_data.napis[ce_id],
qdf_get_cpu());
qdf_atomic_set(&ce_state->rx_pending, 0);
if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
@@ -2025,11 +2029,6 @@ static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
}
#endif /* WLAN_FEATURE_FASTPATH */
/* Maximum amount of time in nano seconds before which the CE per engine service
* should yield. ~1 jiffie.
*/
#define CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS (10 * 1000 * 1000)
/*
* Guts of interrupt handler for per-engine interrupts on a particular CE.
*
@@ -2066,10 +2065,11 @@ int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
/* Clear force_break flag and re-initialize receive_count to 0 */
CE_state->receive_count = 0;
CE_state->force_break = 0;
CE_state->ce_service_start_time = sched_clock();
CE_state->ce_service_yield_time =
sched_clock() +
(unsigned long long)CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS;
CE_state->ce_service_start_time +
hif_get_ce_service_max_yield_time(
(struct hif_opaque_softc *)scn);
qdf_spin_lock(&CE_state->ce_index_lock);
/*

View File

@@ -1260,3 +1260,19 @@ void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
scn->initial_wakeup_priv = priv;
}
void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
uint8_t ce_service_max_yield_time)
{
struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
hif_ctx->ce_service_max_yield_time =
ce_service_max_yield_time * 1000 * 1000;
}
unsigned long long
hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
{
struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
return hif_ctx->ce_service_max_yield_time;
}

View File

@@ -176,6 +176,8 @@ struct hif_softc {
#ifdef FEATURE_NAPI
struct qca_napi_data napi_data;
#endif /* FEATURE_NAPI */
/* stores ce_service_max_yield_time in ns */
unsigned long long ce_service_max_yield_time;
struct hif_driver_state_callbacks callbacks;
uint32_t hif_con_param;
#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT

View File

@@ -853,6 +853,18 @@ out:
}
qdf_export_symbol(hif_napi_poll);
void hif_update_napi_max_poll_time(struct CE_state *ce_state,
struct qca_napi_info *napi_info,
int cpu_id)
{
unsigned long long napi_poll_time = sched_clock() -
ce_state->ce_service_start_time;
if (napi_poll_time >
napi_info->stats[cpu_id].napi_max_poll_time)
napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
}
#ifdef HIF_IRQ_AFFINITY
/**
*
@@ -890,16 +902,22 @@ void hif_napi_update_yield_stats(struct CE_state *ce_state,
return;
}
if (unlikely(NULL == napi_data->napis[ce_id]))
return;
ce_id = ce_state->id;
cpu_id = qdf_get_cpu();
if (unlikely(!napi_data->napis[ce_id])) {
HIF_INFO("%s: NAPI info is NULL for ce id: %d",
__func__, ce_id);
return;
}
if (time_limit_reached)
napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
else
napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
hif_update_napi_max_poll_time(ce_state, napi_data->napis[ce_id],
cpu_id);
}
/**

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -255,4 +255,17 @@ static inline int hif_napi_cpu_blacklist(struct qca_napi_data *napid,
{ return 0; }
#endif /* HIF_IRQ_AFFINITY */
/**
* hif_update_napi_max_poll_time() - updates NAPI max poll time
* @ce_state: ce state
* @napi_info: pointer to napi info structure
* @cpu_id: cpu id
*
* This API updates NAPI max poll time per CE per SPU.
*
* Return: void
*/
void hif_update_napi_max_poll_time(struct CE_state *ce_state,
struct qca_napi_info *napi_info,
int cpu_id);
#endif /* __HIF_NAPI_H__ */