Files
android_kernel_xiaomi_sm8450/include/linux/sched/sysctl.h
Pavankumar Kondeti 9db84311e7 tracing: rework sched_preempt_disable trace point implementation
The current implementation of sched_preempt_disable trace point
fails to detect the preemption disable time inside spin_lock_bh()
and spin_unlock_bh(). This is because __local_bh_disable_ip() calls
directly __preempt_count_add() which skips the preemption disable
tracking. Instead of relying on the updates to preempt count, it
is better to write the preemption disable tracking directly to
preemptsoff tracer. This is similar to how irq disable tracking
is done.

The current code handles the false positives coming from __schedule()
by directly resetting the time stamp. This requires an interface
from the scheduler to preemptsoff tracer. To avoid this additional
interface, the current patch detects the same condition by comparing
the task pid and context switch count. If they are not matching
at the time of preemption disable to enable, don't track the preemption
disable time as it involved a context switch.

Due to this rework. the sched_preempt_disable trace point location is
changed to

/sys/kernel/debug/tracing/events/preemptirq/sched_preempt_disable/enable

Change-Id: I7f58d316b7c54bc7a54102bfeb678404bda010d4
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: port to 5.4 and resolve trivial merge conflicts]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
2020-01-27 15:30:25 -08:00

170 lines
5.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SYSCTL_H
#define _LINUX_SCHED_SYSCTL_H
#include <linux/types.h>
struct ctl_table;
#ifdef CONFIG_DETECT_HUNG_TASK
extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_check_interval_secs;
extern int sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
#define MAX_CLUSTERS 3
/* MAX_MARGIN_LEVELS should be one less than MAX_CLUSTERS */
#define MAX_MARGIN_LEVELS (MAX_CLUSTERS - 1)
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
#ifdef CONFIG_SCHED_WALT
extern unsigned int __weak sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int __weak sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
extern unsigned int __weak sysctl_sched_user_hint;
extern const int __weak sched_user_hint_max;
extern unsigned int __weak sysctl_sched_cpu_high_irqload;
extern unsigned int __weak sysctl_sched_boost;
extern unsigned int __weak sysctl_sched_group_upmigrate_pct;
extern unsigned int __weak sysctl_sched_group_downmigrate_pct;
extern unsigned int __weak sysctl_sched_conservative_pl;
extern unsigned int __weak sysctl_sched_walt_rotate_big_tasks;
extern unsigned int __weak sysctl_sched_min_task_util_for_boost;
extern unsigned int __weak sysctl_sched_min_task_util_for_colocation;
extern unsigned int __weak sysctl_sched_asym_cap_sibling_freq_match_pct;
extern unsigned int __weak sysctl_sched_coloc_downmigrate_ns;
extern unsigned int __weak sysctl_sched_task_unfilter_period;
extern unsigned int __weak sysctl_sched_busy_hyst_enable_cpus;
extern unsigned int __weak sysctl_sched_busy_hyst;
extern unsigned int __weak sysctl_sched_coloc_busy_hyst_enable_cpus;
extern unsigned int __weak sysctl_sched_coloc_busy_hyst;
extern unsigned int __weak sysctl_sched_coloc_busy_hyst_max_ms;
extern unsigned int __weak sysctl_sched_window_stats_policy;
extern unsigned int __weak sysctl_sched_ravg_window_nr_ticks;
extern unsigned int __weak sysctl_sched_many_wakeup_threshold;
extern unsigned int __weak sysctl_sched_dynamic_ravg_window_enable;
extern int __weak
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int __weak
walt_proc_user_hint_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int __weak
sched_updown_migrate_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int __weak
sched_ravg_window_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif
#if defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PREEMPT_TRACER)
extern unsigned int sysctl_preemptoff_tracing_threshold_ns;
#endif
#if defined(CONFIG_PREEMPTIRQ_EVENTS) && defined(CONFIG_IRQSOFF_TRACER)
extern unsigned int sysctl_irqsoff_tracing_threshold_ns;
#endif
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
#ifdef CONFIG_SCHED_WALT
extern int __weak sched_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
/*
* control realtime throttling:
*
* /proc/sys/kernel/sched_rt_period_us
* /proc/sys/kernel/sched_rt_runtime_us
*/
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
#ifdef CONFIG_UCLAMP_TASK
extern unsigned int sysctl_sched_uclamp_util_min;
extern unsigned int sysctl_sched_uclamp_util_max;
#endif
#ifdef CONFIG_CFS_BANDWIDTH
extern unsigned int sysctl_sched_cfs_bandwidth_slice;
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;
#endif
extern int sysctl_sched_rr_timeslice;
extern int sched_rr_timeslice;
extern int sched_rr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#ifdef CONFIG_UCLAMP_TASK
extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif
extern int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
extern unsigned int sysctl_sched_energy_aware;
extern int sched_energy_aware_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif
#define LIB_PATH_LENGTH 512
extern char sched_lib_name[LIB_PATH_LENGTH];
extern unsigned int sched_lib_mask_force;
extern bool is_sched_lib_based_app(pid_t pid);
#endif /* _LINUX_SCHED_SYSCTL_H */