Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "31 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (31 commits) ocfs2: fix potential use after free mm/khugepaged: fix the xas_create_range() error path mm/khugepaged: collapse_shmem() do not crash on Compound mm/khugepaged: collapse_shmem() without freezing new_page mm/khugepaged: minor reorderings in collapse_shmem() mm/khugepaged: collapse_shmem() remember to clear holes mm/khugepaged: fix crashes due to misaccounted holes mm/khugepaged: collapse_shmem() stop if punched or truncated mm/huge_memory: fix lockdep complaint on 32-bit i_size_read() mm/huge_memory: splitting set mapping+index before unfreeze mm/huge_memory: rename freeze_page() to unmap_page() initramfs: clean old path before creating a hardlink kernel/kcov.c: mark funcs in __sanitizer_cov_trace_pc() as notrace psi: make disabling/enabling easier for vendor kernels proc: fixup map_files test on arm debugobjects: avoid recursive calls with kmemleak userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE is not set userfaultfd: shmem: add i_size checks userfaultfd: shmem/hugetlbfs: only allow to register VM_MAYWRITE vmas userfaultfd: shmem: allocate anonymous memory for MAP_PRIVATE shmem ...
This commit is contained in:
@@ -56,7 +56,7 @@ struct kcov {
|
||||
struct task_struct *t;
|
||||
};
|
||||
|
||||
static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
||||
static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
||||
{
|
||||
unsigned int mode;
|
||||
|
||||
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
|
||||
return mode == needed_mode;
|
||||
}
|
||||
|
||||
static unsigned long canonicalize_ip(unsigned long ip)
|
||||
static notrace unsigned long canonicalize_ip(unsigned long ip)
|
||||
{
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
ip -= kaslr_offset();
|
||||
|
@@ -136,8 +136,18 @@
|
||||
|
||||
static int psi_bug __read_mostly;
|
||||
|
||||
bool psi_disabled __read_mostly;
|
||||
core_param(psi_disabled, psi_disabled, bool, 0644);
|
||||
DEFINE_STATIC_KEY_FALSE(psi_disabled);
|
||||
|
||||
#ifdef CONFIG_PSI_DEFAULT_DISABLED
|
||||
bool psi_enable;
|
||||
#else
|
||||
bool psi_enable = true;
|
||||
#endif
|
||||
static int __init setup_psi(char *str)
|
||||
{
|
||||
return kstrtobool(str, &psi_enable) == 0;
|
||||
}
|
||||
__setup("psi=", setup_psi);
|
||||
|
||||
/* Running averages - we need to be higher-res than loadavg */
|
||||
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
|
||||
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
|
||||
|
||||
void __init psi_init(void)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (!psi_enable) {
|
||||
static_branch_enable(&psi_disabled);
|
||||
return;
|
||||
}
|
||||
|
||||
psi_period = jiffies_to_nsecs(PSI_FREQ);
|
||||
group_init(&psi_system);
|
||||
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
*flags = current->flags & PF_MEMSTALL;
|
||||
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (*flags)
|
||||
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
|
||||
#ifdef CONFIG_CGROUPS
|
||||
int psi_cgroup_alloc(struct cgroup *cgroup)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return 0;
|
||||
|
||||
cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
|
||||
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
|
||||
|
||||
void psi_cgroup_free(struct cgroup *cgroup)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&cgroup->psi.clock_work);
|
||||
@@ -637,7 +649,7 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (psi_disabled) {
|
||||
if (static_branch_likely(&psi_disabled)) {
|
||||
/*
|
||||
* Lame to do this here, but the scheduler cannot be locked
|
||||
* from the outside, so we move cgroups from inside sched/.
|
||||
@@ -673,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
|
||||
{
|
||||
int full;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
update_stats(group);
|
||||
|
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
|
||||
{
|
||||
int clear = 0, set = TSK_RUNNING;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (!wakeup || p->sched_psi_wake_requeue) {
|
||||
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
|
||||
{
|
||||
int clear = TSK_RUNNING, set = 0;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (!sleep) {
|
||||
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
|
||||
|
||||
static inline void psi_ttwu_dequeue(struct task_struct *p)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
/*
|
||||
* Is the task being migrated during a wakeup? Make sure to
|
||||
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
|
||||
|
||||
static inline void psi_task_tick(struct rq *rq)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (unlikely(rq->curr->flags & PF_MEMSTALL))
|
||||
|
Referens i nytt ärende
Block a user