sched: Add snapshot of affinity changes

This snapshot is taken from msm-4.19 as of 'commit 5debecbe7195
("trace: filter out spurious preemption and IRQs disable traces")'.

Change-Id: I5b7bcbbc1da5dffb89932dfda392029159d17859
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Satya Durga Srinivasu Prabhala
2019-09-17 11:33:01 -07:00
parent a9e87164b8
commit 93f140f6eb
6 changed files with 111 additions and 1 deletions

View File

@@ -29,6 +29,7 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/sched/sysctl.h>
#include <trace/events/power.h> #include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list); static LIST_HEAD(cpufreq_policy_list);
@@ -688,11 +689,35 @@ static ssize_t show_##file_name \
} }
show_one(cpuinfo_min_freq, cpuinfo.min_freq); show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min); show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max); show_one(scaling_max_freq, max);
unsigned int cpuinfo_max_freq_cached;
static bool should_use_cached_freq(int cpu)
{
if (!cpuinfo_max_freq_cached)
return false;
if (!(BIT(cpu) & sched_lib_mask_force))
return false;
return is_sched_lib_based_app(current->pid);
}
static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
{
unsigned int freq = policy->cpuinfo.max_freq;
if (should_use_cached_freq(policy->cpu))
freq = cpuinfo_max_freq_cached << 1;
else
freq = policy->cpuinfo.max_freq;
return scnprintf(buf, PAGE_SIZE, "%u\n", freq);
}
__weak unsigned int arch_freq_get_on_cpu(int cpu) __weak unsigned int arch_freq_get_on_cpu(int cpu)
{ {
return 0; return 0;

View File

@@ -54,6 +54,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
policy->min = policy->cpuinfo.min_freq = min_freq; policy->min = policy->cpuinfo.min_freq = min_freq;
policy->max = policy->cpuinfo.max_freq = max_freq; policy->max = policy->cpuinfo.max_freq = max_freq;
if (max_freq > cpuinfo_max_freq_cached)
cpuinfo_max_freq_cached = max_freq;
if (policy->min == ~0) if (policy->min == ~0)
return -EINVAL; return -EINVAL;
else else

View File

@@ -1000,4 +1000,6 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy, void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table, struct cpufreq_frequency_table *table,
unsigned int transition_latency); unsigned int transition_latency);
extern unsigned int cpuinfo_max_freq_cached;
#endif /* _LINUX_CPUFREQ_H */ #endif /* _LINUX_CPUFREQ_H */

View File

@@ -101,4 +101,9 @@ extern int sched_energy_aware_handler(struct ctl_table *table, int write,
loff_t *ppos); loff_t *ppos);
#endif #endif
#define LIB_PATH_LENGTH 512
extern char sched_lib_name[LIB_PATH_LENGTH];
extern unsigned int sched_lib_mask_force;
extern bool is_sched_lib_based_app(pid_t pid);
#endif /* _LINUX_SCHED_SYSCTL_H */ #endif /* _LINUX_SCHED_SYSCTL_H */

View File

@@ -5474,6 +5474,60 @@ out_put_task:
return retval; return retval;
} }
char sched_lib_name[LIB_PATH_LENGTH];
unsigned int sched_lib_mask_force;
bool is_sched_lib_based_app(pid_t pid)
{
const char *name = NULL;
struct vm_area_struct *vma;
char path_buf[LIB_PATH_LENGTH];
bool found = false;
struct task_struct *p;
struct mm_struct *mm;
if (strnlen(sched_lib_name, LIB_PATH_LENGTH) == 0)
return false;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
return false;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
mm = get_task_mm(p);
if (!mm)
goto put_task_struct;
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma ; vma = vma->vm_next) {
if (vma->vm_file && vma->vm_flags & VM_EXEC) {
name = d_path(&vma->vm_file->f_path,
path_buf, LIB_PATH_LENGTH);
if (IS_ERR(name))
goto release_sem;
if (strnstr(name, sched_lib_name,
strnlen(name, LIB_PATH_LENGTH))) {
found = true;
break;
}
}
}
release_sem:
up_read(&mm->mmap_sem);
mmput(mm);
put_task_struct:
put_task_struct(p);
return found;
}
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask) struct cpumask *new_mask)
{ {

View File

@@ -139,6 +139,9 @@ static int ten_thousand = 10000;
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static int six_hundred_forty_kb = 640 * 1024; static int six_hundred_forty_kb = 640 * 1024;
#endif #endif
#ifdef CONFIG_SCHED_WALT
static int two_hundred_fifty_five = 255;
#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -499,6 +502,24 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE, .extra2 = SYSCTL_ONE,
}, },
#endif #endif
#ifdef CONFIG_SCHED_WALT
{
.procname = "sched_lib_name",
.data = sched_lib_name,
.maxlen = LIB_PATH_LENGTH,
.mode = 0644,
.proc_handler = proc_dostring,
},
{
.procname = "sched_lib_mask_force",
.data = &sched_lib_mask_force,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &two_hundred_fifty_five,
},
#endif
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
{ {
.procname = "prove_locking", .procname = "prove_locking",