qcacmn: Add detection for tasklet delay

This feature is used to detect CE tasklet scheduling delay
and credit response delay issues.

Change-Id: I9a8fcb425edd5cf96fae5f6cd3bfc7f51172c814
CRs-Fixed: 2874874
Dieser Commit ist enthalten in:
Jingxiang Ge
2021-02-05 12:45:50 +08:00
committet von snandini
Ursprung f4729e7ebf
Commit 2c6d25c0ad
9 geänderte Dateien mit 404 neuen und 6 gelöschten Zeilen

Datei anzeigen

@@ -1731,4 +1731,51 @@ int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
* Return: 0 on success. Error code on failure.
*/
int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
enum hif_credit_exchange_type {
HIF_REQUEST_CREDIT,
HIF_PROCESS_CREDIT_REPORT,
};
enum hif_detect_latency_type {
HIF_DETECT_TASKLET,
HIF_DETECT_CREDIT,
HIF_DETECT_UNKNOWN
};
#ifdef HIF_DETECTION_LATENCY_ENABLE
void hif_latency_detect_credit_record_time(
enum hif_credit_exchange_type type,
struct hif_opaque_softc *hif_ctx);
void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
void hif_check_detection_latency(struct hif_softc *scn,
bool from_timer,
uint32_t bitmap_type);
void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
#else
static inline
void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
{}
static inline
void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
{}
static inline
void hif_latency_detect_credit_record_time(
enum hif_credit_exchange_type type,
struct hif_opaque_softc *hif_ctx)
{}
static inline
void hif_check_detection_latency(struct hif_softc *scn,
bool from_timer,
uint32_t bitmap_type)
{}
static inline
void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
{}
#endif
#endif /* _HIF_H_ */

Datei anzeigen

@@ -336,6 +336,44 @@ hif_ce_latency_stats(struct hif_softc *hif_ctx)
}
#endif /*CE_TASKLET_DEBUG_ENABLE*/
#ifdef HIF_DETECTION_LATENCY_ENABLE
static inline
void hif_latency_detect_tasklet_sched(
struct hif_softc *scn,
struct ce_tasklet_entry *tasklet_entry)
{
if (tasklet_entry->ce_id != CE_ID_2)
return;
scn->latency_detect.ce2_tasklet_sched_cpuid = qdf_get_cpu();
scn->latency_detect.ce2_tasklet_sched_time = qdf_system_ticks();
}
static inline
void hif_latency_detect_tasklet_exec(
struct hif_softc *scn,
struct ce_tasklet_entry *tasklet_entry)
{
if (tasklet_entry->ce_id != CE_ID_2)
return;
scn->latency_detect.ce2_tasklet_exec_time = qdf_system_ticks();
hif_check_detection_latency(scn, false, BIT(HIF_DETECT_TASKLET));
}
#else
static inline
void hif_latency_detect_tasklet_sched(
struct hif_softc *scn,
struct ce_tasklet_entry *tasklet_entry)
{}
static inline
void hif_latency_detect_tasklet_exec(
struct hif_softc *scn,
struct ce_tasklet_entry *tasklet_entry)
{}
#endif
/**
* ce_tasklet() - ce_tasklet
* @data: data
@@ -356,6 +394,8 @@ static void ce_tasklet(unsigned long data)
hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
hif_latency_detect_tasklet_exec(scn, tasklet_entry);
if (qdf_atomic_read(&scn->link_suspended)) {
hif_err("ce %d tasklet fired after link suspend",
tasklet_entry->ce_id);
@@ -382,6 +422,7 @@ static void ce_tasklet(unsigned long data)
}
ce_schedule_tasklet(tasklet_entry);
hif_latency_detect_tasklet_sched(scn, tasklet_entry);
return;
}
@@ -604,8 +645,12 @@ static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
qdf_atomic_dec(&scn->active_tasklet_cnt);
return false;
}
/* keep it before tasklet_schedule, this is to happy whunt.
* in whunt, tasklet may run before finished hif_tasklet_schedule.
*/
hif_latency_detect_tasklet_sched(scn, tasklet_entry);
tasklet_schedule(&tasklet_entry->intr_tq);
if (scn->ce_latency_stats)
hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);

Datei anzeigen

@@ -50,6 +50,7 @@
#include <qdf_notifier.h>
#include <qdf_hang_event_notifier.h>
#endif
#include <linux/cpumask.h>
void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
{
@@ -645,6 +646,231 @@ static void hif_cpuhp_unregister(struct hif_softc *scn)
}
#endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
#ifdef HIF_DETECTION_LATENCY_ENABLE
/**
* hif_check_detection_latency(): to check if latency for tasklet/credit
*
* @scn: hif context
* @from_timer: if called from timer handler
* @bitmap_type: indicate if check tasklet or credit
*
* Return: none
*/
void hif_check_detection_latency(struct hif_softc *scn,
bool from_timer,
uint32_t bitmap_type)
{
qdf_time_t ce2_tasklet_sched_time =
scn->latency_detect.ce2_tasklet_sched_time;
qdf_time_t ce2_tasklet_exec_time =
scn->latency_detect.ce2_tasklet_exec_time;
qdf_time_t credit_request_time =
scn->latency_detect.credit_request_time;
qdf_time_t credit_report_time =
scn->latency_detect.credit_report_time;
qdf_time_t curr_jiffies = qdf_system_ticks();
uint32_t detect_latency_threshold =
scn->latency_detect.detect_latency_threshold;
int cpu_id = qdf_get_cpu();
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
if (!scn->latency_detect.enable_detection)
return;
/* 2 kinds of check here.
* from_timer==true: check if tasklet or credit report stall
* from_timer==false: check tasklet execute or credit report comes late
*/
if (bitmap_type & BIT(HIF_DETECT_TASKLET) &&
(from_timer ?
qdf_system_time_after(ce2_tasklet_sched_time,
ce2_tasklet_exec_time) :
qdf_system_time_after(ce2_tasklet_exec_time,
ce2_tasklet_sched_time)) &&
qdf_system_time_after(
curr_jiffies,
ce2_tasklet_sched_time +
qdf_system_msecs_to_ticks(detect_latency_threshold))) {
hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
from_timer, curr_jiffies, ce2_tasklet_sched_time,
ce2_tasklet_exec_time, detect_latency_threshold,
scn->latency_detect.detect_latency_timer_timeout,
cpu_id, (void *)_RET_IP_);
goto latency;
}
if (bitmap_type & BIT(HIF_DETECT_CREDIT) &&
(from_timer ?
qdf_system_time_after(credit_request_time,
credit_report_time) :
qdf_system_time_after(credit_report_time,
credit_request_time)) &&
qdf_system_time_after(
curr_jiffies,
credit_request_time +
qdf_system_msecs_to_ticks(detect_latency_threshold))) {
hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
from_timer, curr_jiffies, credit_request_time,
credit_report_time, detect_latency_threshold,
scn->latency_detect.detect_latency_timer_timeout,
cpu_id, (void *)_RET_IP_);
goto latency;
}
return;
latency:
qdf_check_state_before_panic(__func__, __LINE__);
}
static void hif_latency_detect_timeout_handler(void *arg)
{
struct hif_softc *scn = (struct hif_softc *)arg;
int next_cpu;
hif_check_detection_latency(scn, true,
BIT(HIF_DETECT_TASKLET) |
BIT(HIF_DETECT_CREDIT));
/* it need to make sure timer start on a differnt cpu,
* so it can detect the tasklet schedule stall, but there
* is still chance that, after timer has been started, then
* irq/tasklet happens on the same cpu, then tasklet will
* execute before softirq timer, if this tasklet stall, the
* timer can't detect it, we can accept this as a limition,
* if tasklet stall, anyway other place will detect it, just
* a little later.
*/
next_cpu = cpumask_any_but(
cpu_active_mask,
scn->latency_detect.ce2_tasklet_sched_cpuid);
if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
hif_debug("start timer on local");
/* it doesn't found a available cpu, start on local cpu*/
qdf_timer_mod(
&scn->latency_detect.detect_latency_timer,
scn->latency_detect.detect_latency_timer_timeout);
} else {
qdf_timer_start_on(
&scn->latency_detect.detect_latency_timer,
scn->latency_detect.detect_latency_timer_timeout,
next_cpu);
}
}
static void hif_latency_detect_timer_init(struct hif_softc *scn)
{
if (!scn) {
hif_info_high("scn is null");
return;
}
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
scn->latency_detect.detect_latency_timer_timeout =
DETECTION_TIMER_TIMEOUT;
scn->latency_detect.detect_latency_threshold =
DETECTION_LATENCY_THRESHOLD;
hif_info("timer timeout %u, latency threshold %u",
scn->latency_detect.detect_latency_timer_timeout,
scn->latency_detect.detect_latency_threshold);
scn->latency_detect.is_timer_started = false;
qdf_timer_init(NULL,
&scn->latency_detect.detect_latency_timer,
&hif_latency_detect_timeout_handler,
scn,
QDF_TIMER_TYPE_SW_SPIN);
}
static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
{
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
hif_info("deinit timer");
qdf_timer_free(&scn->latency_detect.detect_latency_timer);
}
void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
hif_info_rl("start timer");
if (scn->latency_detect.is_timer_started) {
hif_info("timer has been started");
return;
}
qdf_timer_start(&scn->latency_detect.detect_latency_timer,
scn->latency_detect.detect_latency_timer_timeout);
scn->latency_detect.is_timer_started = true;
}
void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
hif_info_rl("stop timer");
qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
scn->latency_detect.is_timer_started = false;
}
void hif_latency_detect_credit_record_time(
enum hif_credit_exchange_type type,
struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
if (!scn) {
hif_err("Could not do runtime put, scn is null");
return;
}
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
if (HIF_REQUEST_CREDIT == type)
scn->latency_detect.credit_request_time = qdf_system_ticks();
else if (HIF_PROCESS_CREDIT_REPORT == type)
scn->latency_detect.credit_report_time = qdf_system_ticks();
hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
}
void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
if (!scn) {
hif_err("Could not do runtime put, scn is null");
return;
}
if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
return;
scn->latency_detect.enable_detection = value;
}
#else
static void hif_latency_detect_timer_init(struct hif_softc *scn)
{}
static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
{}
#endif
struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
uint32_t mode,
enum qdf_bus_type bus_type,
@@ -667,6 +893,7 @@ struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
scn->qdf_dev = qdf_ctx;
scn->hif_con_param = mode;
qdf_atomic_init(&scn->active_tasklet_cnt);
qdf_atomic_init(&scn->active_grp_tasklet_cnt);
qdf_atomic_init(&scn->link_suspended);
qdf_atomic_init(&scn->tasklet_from_intr);
@@ -684,8 +911,13 @@ struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
status, bus_type);
qdf_mem_free(scn);
scn = NULL;
goto out;
}
hif_cpuhp_register(scn);
hif_latency_detect_timer_init(scn);
out:
return GET_HIF_OPAQUE_HDL(scn);
}
@@ -722,6 +954,8 @@ void hif_close(struct hif_opaque_softc *hif_ctx)
return;
}
hif_latency_detect_timer_deinit(scn);
if (scn->athdiag_procfs_inited) {
athdiag_procfs_remove();
scn->athdiag_procfs_inited = false;
@@ -919,6 +1153,7 @@ QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
hif_ut_suspend_init(scn);
hif_register_recovery_notifier(scn);
hif_latency_detect_timer_start(hif_ctx);
/*
* Flag to avoid potential unallocated memory access from MSI
@@ -948,6 +1183,9 @@ void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
if (!scn)
return;
hif_set_enable_detection(hif_ctx, false);
hif_latency_detect_timer_stop(hif_ctx);
hif_unregister_recovery_notifier(scn);
hif_nointrs(scn);

Datei anzeigen

@@ -147,6 +147,22 @@ struct hif_ce_stats {
int ce_ring_delta_fail_count;
};
#ifdef HIF_DETECTION_LATENCY_ENABLE
struct hif_latency_detect {
qdf_timer_t detect_latency_timer;
uint32_t detect_latency_timer_timeout;
bool is_timer_started;
bool enable_detection;
/* threshold when stall happens */
uint32_t detect_latency_threshold;
int ce2_tasklet_sched_cpuid;
qdf_time_t ce2_tasklet_sched_time;
qdf_time_t ce2_tasklet_exec_time;
qdf_time_t credit_request_time;
qdf_time_t credit_report_time;
};
#endif
/*
* Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
* for defined here
@@ -273,6 +289,9 @@ struct hif_softc {
/* Variable to track the link state change in RTPM */
qdf_atomic_t pm_link_state;
#endif
#ifdef HIF_DETECTION_LATENCY_ENABLE
struct hif_latency_detect latency_detect;
#endif
};
static inline

Datei anzeigen

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2018,2020-2021, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -50,6 +50,10 @@ void htc_log_hang_credit_history(struct notifier_block *block, void *data)
{
}
#endif
void htc_latency_detection_credit_record_time(
enum htc_credit_exchange_type type,
struct hif_opaque_softc *hif_ctx);
#else /* FEATURE_HTC_CREDIT_HISTORY */
static inline

Datei anzeigen

@@ -1186,6 +1186,9 @@ static void get_htc_send_packets_credit_based(HTC_TARGET *target,
pEndpoint->TxCredits,
HTC_PACKET_QUEUE_DEPTH
(tx_queue));
hif_latency_detect_credit_record_time(
HIF_REQUEST_CREDIT,
target->hif_dev);
}
INC_HTC_EP_STAT(pEndpoint,
TxCreditLowIndications, 1);
@@ -2680,6 +2683,9 @@ void htc_process_credit_rpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt,
pEndpoint->TxCredits + rpt_credits,
HTC_PACKET_QUEUE_DEPTH(&pEndpoint->
TxQueue));
hif_latency_detect_credit_record_time(
HIF_PROCESS_CREDIT_REPORT,
target->hif_dev);
}
pEndpoint->TxCredits += rpt_credits;

Datei anzeigen

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, 2018-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2014-2016, 2018-2021 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -65,6 +65,20 @@ static inline void qdf_timer_start(qdf_timer_t *timer, int msec)
__qdf_timer_start(timer, msec);
}
/**
* qdf_timer_start_on() - start a timer on assigned cpu
* @timer: timer to start
* @msec: Expiration period in milliseconds
* @cpu: cpu to attach timer
*
*
* Return: none
*/
static inline void qdf_timer_start_on(qdf_timer_t *timer, int msec, int cpu)
{
__qdf_timer_start_on(timer, msec, cpu);
}
/**
* qdf_timer_mod() - modify the timeout on a timer
* @timer: timer to modify

Datei anzeigen

@@ -210,10 +210,13 @@ typedef struct qdf_shared_mem {
* on expiry
* @QDF_TIMER_TYPE_WAKE_APPS: Non deferrable timer which will cause CPU to
* wake up on expiry
* @QDF_TIMER_TYPE_SW_SPIN: Deferrable&Pinned SW timer, it will not cause cpu
* to wake up on expiry and be able to queue on assigned cpu by add_timer_on
*/
typedef enum {
QDF_TIMER_TYPE_SW,
QDF_TIMER_TYPE_WAKE_APPS
QDF_TIMER_TYPE_WAKE_APPS,
QDF_TIMER_TYPE_SW_SPIN
} QDF_TIMER_TYPE;
/**

Datei anzeigen

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -71,6 +71,8 @@ static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer,
if (type == QDF_TIMER_TYPE_SW)
flags |= TIMER_DEFERRABLE;
else if (type == QDF_TIMER_TYPE_SW_SPIN)
flags |= TIMER_DEFERRABLE | TIMER_PINNED;
if (object_is_on_stack(os_timer))
timer_setup_on_stack(os_timer, __os_timer_shim, flags);
@@ -79,7 +81,6 @@ static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer,
return QDF_STATUS_SUCCESS;
}
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
@@ -112,6 +113,17 @@ static inline QDF_STATUS __qdf_timer_init(struct __qdf_timer_t *timer,
addr);
else
setup_deferrable_timer(os_timer, __os_timer_shim, addr);
} else if (type == QDF_TIMER_TYPE_SW_SPIN) {
if (is_on_stack)
__setup_timer_on_stack(os_timer,
__os_timer_shim,
addr,
TIMER_DEFERRABLE | TIMER_PINNED);
else
__setup_timer(os_timer,
__os_timer_shim,
addr,
TIMER_DEFERRABLE | TIMER_PINNED);
} else {
if (is_on_stack)
setup_timer_on_stack(os_timer, __os_timer_shim, addr);
@@ -131,6 +143,16 @@ static inline void __qdf_timer_start(struct __qdf_timer_t *timer, uint32_t msec)
add_timer(os_timer);
}
static inline
void __qdf_timer_start_on(struct __qdf_timer_t *timer, uint32_t msec,
int cpu)
{
struct timer_list *os_timer = &timer->os_timer;
os_timer->expires = jiffies + __qdf_scaled_msecs_to_jiffies(msec);
add_timer_on(os_timer, cpu);
}
static inline bool __qdf_timer_mod(struct __qdf_timer_t *timer, uint32_t msec)
{
return mod_timer(&timer->os_timer,