Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

The MSCC bug fix in 'net' had to be slightly adjusted because the
register accesses are done slightly differently in net-next.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2020-05-24 13:47:27 -07:00
422 changed files with 3508 additions and 1711 deletions

View File

@@ -627,9 +627,20 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
mutex_lock(&map->freeze_mutex);
if ((vma->vm_flags & VM_WRITE) && map->frozen) {
err = -EPERM;
goto out;
if (vma->vm_flags & VM_WRITE) {
if (map->frozen) {
err = -EPERM;
goto out;
}
/* map is meant to be read-only, so do not allow mapping as
* writable, because it's possible to leak a writable page
* reference and allows user-space to still modify it after
* freezing, while verifier will assume contents do not change
*/
if (map->map_flags & BPF_F_RDONLY_PROG) {
err = -EACCES;
goto out;
}
}
/* set default open/close callbacks */

View File

@@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_est.enqueued);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp[UCLAMP_MAX].value);
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
#endif

View File

@@ -4773,7 +4773,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
int enqueue = 1;
long task_delta, idle_task_delta;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -4797,26 +4796,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
if (se->on_rq)
enqueue = 0;
break;
cfs_rq = cfs_rq_of(se);
if (enqueue) {
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
} else {
update_load_avg(cfs_rq, se, 0);
se_update_runnable(se);
}
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
break;
goto unthrottle_throttle;
}
if (!se)
add_nr_running(rq, task_delta);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
update_load_avg(cfs_rq, se, UPDATE_TG);
se_update_runnable(se);
cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto unthrottle_throttle;
/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta);
unthrottle_throttle:
/*
* The cfs_rq_throttled() breaks in the above iteration can result in
* incomplete leaf list maintenance, resulting in triggering the
@@ -4825,7 +4842,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
list_add_leaf_cfs_rq(cfs_rq);
if (list_add_leaf_cfs_rq(cfs_rq))
break;
}
assert_list_leaf_cfs_rq(rq);
@@ -5478,6 +5496,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto enqueue_throttle;
/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
enqueue_throttle: