sched: Add snapshot of Window Assisted Load Tracking (WALT)

This snapshot is taken from msm-4.19 as of commit 5debecbe7195
("trace: filter out spurious preemption and IRQs disable traces").

Change-Id: I8fab4084971baadcaa037f40ab549fc073a4b1ea
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Satya Durga Srinivasu Prabhala
2019-09-09 15:32:44 -07:00
parent 6df025a02c
commit 64b577b9cc
24 changed files with 2050 additions and 92 deletions

View File

@@ -117,6 +117,18 @@ struct task_group;
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
/*
* Enum for display driver to provide varying refresh rates
*/
enum fps {
FPS0 = 0,
FPS30 = 30,
FPS48 = 48,
FPS60 = 60,
FPS90 = 90,
FPS120 = 120,
};
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
/*
@@ -212,6 +224,21 @@ struct task_group;
/* Task command name length: */
#define TASK_COMM_LEN 16
enum task_event {
PUT_PREV_TASK = 0,
PICK_NEXT_TASK = 1,
TASK_WAKE = 2,
TASK_MIGRATE = 3,
TASK_UPDATE = 4,
IRQ_UPDATE = 5,
};
/* Note: this need to be in sync with migrate_type_names array */
enum migrate_types {
GROUP_TO_RQ,
RQ_TO_GROUP,
};
extern void scheduler_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
@@ -478,6 +505,89 @@ struct sched_entity {
#endif
};
struct cpu_cycle_counter_cb {
u64 (*get_cpu_cycle_counter)(int cpu);
};
DECLARE_PER_CPU_READ_MOSTLY(int, sched_load_boost);
#ifdef CONFIG_SCHED_WALT
extern void __weak sched_exit(struct task_struct *p);
extern int __weak
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void __weak
sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax);
extern void __weak free_task_load_ptrs(struct task_struct *p);
extern void __weak sched_set_refresh_rate(enum fps fps);
#define RAVG_HIST_SIZE_MAX 5
#define NUM_BUSY_BUCKETS 10
/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
/*
* 'mark_start' marks the beginning of an event (task waking up, task
* starting to execute, task being preempted) within a window
*
* 'sum' represents how runnable a task has been within current
* window. It incorporates both running time and wait time and is
* frequency scaled.
*
* 'sum_history' keeps track of history of 'sum' seen over previous
* RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
* ignored.
*
* 'demand' represents maximum sum seen over previous
* sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
* demand for tasks.
*
* 'curr_window_cpu' represents task's contribution to cpu busy time on
* various CPUs in the current window
*
* 'prev_window_cpu' represents task's contribution to cpu busy time on
* various CPUs in the previous window
*
* 'curr_window' represents the sum of all entries in curr_window_cpu
*
* 'prev_window' represents the sum of all entries in prev_window_cpu
*
* 'pred_demand' represents task's current predicted cpu busy time
*
* 'busy_buckets' groups historical busy time into different buckets
* used for prediction
*
* 'demand_scaled' represents task's demand scaled to 1024
*/
u64 mark_start;
u32 sum, demand;
u32 coloc_demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
u16 demand_scaled;
u16 pred_demand_scaled;
u64 active_time;
u64 last_win_size;
};
#else
static inline void sched_exit(struct task_struct *p) { }
static inline int
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
{
return 0;
}
static inline void free_task_load_ptrs(struct task_struct *p) { }
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
u32 fmin, u32 fmax) { }
static inline void sched_set_refresh_rate(enum fps fps) { }
#endif /* CONFIG_SCHED_WALT */
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
@@ -675,6 +785,20 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_SCHED_WALT
u64 last_sleep_ts;
bool wake_up_idle;
struct ravg ravg;
u32 init_load_pct;
u64 last_wake_ts;
u64 last_enqueued_ts;
struct related_thread_group *grp;
struct list_head grp_list;
u64 cpu_cycles;
bool misfit;
u8 unfilter;
#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -2000,4 +2124,37 @@ int sched_trace_rq_cpu(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
#ifdef CONFIG_SCHED_WALT
#define PF_WAKE_UP_IDLE 1
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
{
return p->wake_up_idle;
}
static inline int sched_set_wake_up_idle(struct task_struct *p,
int wake_up_idle)
{
p->wake_up_idle = !!wake_up_idle;
return 0;
}
static inline void set_wake_up_idle(bool enabled)
{
current->wake_up_idle = enabled;
}
#else
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
{
return 0;
}
static inline int sched_set_wake_up_idle(struct task_struct *p,
int wake_up_idle)
{
return 0;
}
static inline void set_wake_up_idle(bool enabled) {}
#endif
#endif