Merge keystone/android12-5.10-keystone-qcom-release.81+ (2c95d7c) into msm-5.10

* refs/heads/tmp-2c95d7c:
  Revert "BACKPORT: FROMLIST: scsi: core: Reserve one tag for the UFS driver"
  UPSTREAM: binder: Add invalid handle info in user error log
  UPSTREAM: ARM: fix Thumb2 regression with Spectre BHB
  UPSTREAM: ARM: Spectre-BHB: provide empty stub for non-config
  UPSTREAM: ARM: fix build warning in proc-v7-bugs.c
  UPSTREAM: ARM: Do not use NOCROSSREFS directive with ld.lld
  UPSTREAM: ARM: fix co-processor register typo
  UPSTREAM: ARM: fix build error when BPF_SYSCALL is disabled
  UPSTREAM: ARM: include unprivileged BPF status in Spectre V2 reporting
  UPSTREAM: ARM: Spectre-BHB workaround
  UPSTREAM: ARM: use LOADADDR() to get load address of sections
  UPSTREAM: ARM: early traps initialisation
  UPSTREAM: ARM: report Spectre v2 status through sysfs
  UPSTREAM: x86/speculation: Warn about eIBRS + LFENCE + Unprivileged eBPF + SMT
  UPSTREAM: x86/speculation: Warn about Spectre v2 LFENCE mitigation
  UPSTREAM: x86/speculation: Update link to AMD speculation whitepaper
  UPSTREAM: x86/speculation: Use generic retpoline by default on AMD
  UPSTREAM: x86/speculation: Include unprivileged eBPF status in Spectre v2 mitigation reporting
  UPSTREAM: Documentation/hw-vuln: Update spectre doc
  UPSTREAM: x86/speculation: Add eIBRS + Retpoline options
  UPSTREAM: x86/speculation: Rename RETPOLINE_AMD to RETPOLINE_LFENCE
  UPSTREAM: x86,bugs: Unconditionally allow spectre_v2=retpoline,amd
  UPSTREAM: bpf: Add kconfig knob for disabling unpriv bpf by default
  ANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree
  ANDROID: mm: page_pinner: fix build warning
  ANDROID: fault: Add vendor hook for TLB conflict
  BACKPORT: sched: Fix yet more sched_fork() races
  ANDROID: mm/slub: Fix Kasan issue with for_each_object_track
  ANDROID: dm kcopyd: Use reserved memory for the copy buffer
  ANDROID: GKI: add allowed list file for xiaomi
  ANDROID: GKI: Update symbols to symbol list
  FROMGIT: f2fs: quota: fix loop condition at f2fs_quota_sync()
  FROMGIT: f2fs: Restore rwsem lockdep support
  ANDROID: ABI: update allowed list for galaxy
  UPSTREAM: mac80211_hwsim: initialize ieee80211_tx_info at hw_scan_work
  ANDROID: GKI: remove vfs-only namespace from 2 symbols

Change-Id: I54118d206503f63d289dc825d51819e1e34540cc
Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
Sivasri Kumar, Vanka
2022-03-21 13:16:06 +05:30
48 changed files with 1445 additions and 596 deletions

View File

@@ -52,7 +52,8 @@ static DEFINE_SPINLOCK(map_idr_lock);
static DEFINE_IDR(link_idr);
static DEFINE_SPINLOCK(link_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly;
int sysctl_unprivileged_bpf_disabled __read_mostly =
IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)

View File

@@ -2249,6 +2249,17 @@ static __latent_entropy struct task_struct *copy_process(
if (retval)
goto bad_fork_put_pidfd;
/*
* Now that the cgroups are pinned, re-clone the parent cgroup and put
* the new task on the correct runqueue. All this *before* the task
* becomes visible.
*
* This isn't part of ->can_fork() because while the re-cloning is
* cgroup specific, it unconditionally needs to place the task on a
* runqueue.
*/
sched_cgroup_fork(p, args);
/*
* From this point on we must avoid any synchronous user-space
* communication until we take the tasklist-lock. In particular, we do
@@ -2357,7 +2368,7 @@ static __latent_entropy struct task_struct *copy_process(
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
sched_post_fork(p, args);
sched_post_fork(p);
cgroup_post_fork(p, args);
perf_event_fork(p);

View File

@@ -880,9 +880,8 @@ int tg_nop(struct task_group *tg, void *data)
}
#endif
static void set_load_weight(struct task_struct *p)
static void set_load_weight(struct task_struct *p, bool update_load)
{
bool update_load = !(READ_ONCE(p->state) & TASK_NEW);
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
@@ -3485,7 +3484,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p);
set_load_weight(p, false);
/*
* We don't need the reset flag anymore after the fork. It has
@@ -3504,6 +3503,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
init_entity_runnable_average(&p->se);
trace_android_rvh_finish_prio_fork(p);
#ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -3519,18 +3519,24 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0;
}
void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *tg;
#endif
/*
* Because we're not yet on the pid-hash, p->pi_lock isn't strictly
* required yet, but lockdep gets upset if rules are violated.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED
tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
struct task_group, css);
p->sched_task_group = autogroup_task_group(p, tg);
if (1) {
struct task_group *tg;
tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
struct task_group, css);
tg = autogroup_task_group(p, tg);
p->sched_task_group = tg;
}
#endif
rseq_migrate(p);
/*
@@ -3541,7 +3547,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
void sched_post_fork(struct task_struct *p)
{
uclamp_post_fork(p);
}
@@ -5253,7 +5262,7 @@ void set_user_nice(struct task_struct *p, long nice)
put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
set_load_weight(p, true);
old_prio = p->prio;
p->prio = effective_prio(p);
@@ -5427,7 +5436,7 @@ static void __setscheduler_params(struct task_struct *p,
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
set_load_weight(p);
set_load_weight(p, true);
}
/*
@@ -7570,7 +7579,7 @@ void __init sched_init(void)
atomic_set(&rq->nr_iowait, 0);
}
set_load_weight(&init_task);
set_load_weight(&init_task, false);
/*
* The boot idle thread does lazy MMU switching as well:

View File

@@ -236,7 +236,34 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
mutex_unlock(&bpf_stats_enabled_mutex);
return ret;
}
#endif
void __weak unpriv_ebpf_notify(int new_state)
{
}
static int bpf_unpriv_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, unpriv_enable = *(int *)table->data;
bool locked_state = unpriv_enable == 1;
struct ctl_table tmp = *table;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
tmp.data = &unpriv_enable;
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && !ret) {
if (locked_state && unpriv_enable != 1)
return -EPERM;
*(int *)table->data = unpriv_enable;
}
unpriv_ebpf_notify(unpriv_enable);
return ret;
}
#endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
/*
* /proc/sys support
@@ -2629,10 +2656,9 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_unprivileged_bpf_disabled,
.maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
.mode = 0644,
/* only handle a transition from default "0" to "1" */
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
.extra2 = SYSCTL_ONE,
.proc_handler = bpf_unpriv_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = &two,
},
{
.procname = "bpf_stats_enabled",