Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor conflict in drivers/s390/net/qeth_l2_main.c, kept the lock from commitc8183f5489
("s390/qeth: fix potential deadlock on workqueue flush"), removed the code which was removed by commit9897d583b0
("s390/qeth: consolidate some duplicated HW cmd code"). Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
@@ -678,8 +678,10 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
|
||||
down_write(&bpf_devs_lock);
|
||||
if (!offdevs_inited) {
|
||||
err = rhashtable_init(&offdevs, &offdevs_params);
|
||||
if (err)
|
||||
if (err) {
|
||||
up_write(&bpf_devs_lock);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
offdevs_inited = true;
|
||||
}
|
||||
up_write(&bpf_devs_lock);
|
||||
|
@@ -1708,11 +1708,11 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
/*
|
||||
* Poll support for process exit notification.
|
||||
*/
|
||||
static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
|
||||
static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct pid *pid = file->private_data;
|
||||
int poll_flags = 0;
|
||||
__poll_t poll_flags = 0;
|
||||
|
||||
poll_wait(file, &pid->wait_pidfd, pts);
|
||||
|
||||
@@ -1724,7 +1724,7 @@ static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
|
||||
* group, then poll(2) should block, similar to the wait(2) family.
|
||||
*/
|
||||
if (!task || (task->exit_state && thread_group_empty(task)))
|
||||
poll_flags = POLLIN | POLLRDNORM;
|
||||
poll_flags = EPOLLIN | EPOLLRDNORM;
|
||||
rcu_read_unlock();
|
||||
|
||||
return poll_flags;
|
||||
|
@@ -814,6 +814,8 @@ EXPORT_SYMBOL_GPL(freq_qos_update_request);
|
||||
*/
|
||||
int freq_qos_remove_request(struct freq_qos_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!req)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -821,7 +823,11 @@ int freq_qos_remove_request(struct freq_qos_request *req)
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
req->qos = NULL;
|
||||
req->type = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(freq_qos_remove_request);
|
||||
|
||||
|
@@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
|
||||
* affecting a valid clamp bucket, the next time it's enqueued,
|
||||
* it will already see the updated clamp bucket value.
|
||||
*/
|
||||
if (!p->uclamp[clamp_id].active) {
|
||||
if (p->uclamp[clamp_id].active) {
|
||||
uclamp_rq_dec_id(rq, p, clamp_id);
|
||||
uclamp_rq_inc_id(rq, p, clamp_id);
|
||||
}
|
||||
@@ -6019,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
__sched_fork(0, idle);
|
||||
|
||||
raw_spin_lock_irqsave(&idle->pi_lock, flags);
|
||||
raw_spin_lock(&rq->lock);
|
||||
|
||||
__sched_fork(0, idle);
|
||||
idle->state = TASK_RUNNING;
|
||||
idle->se.exec_start = sched_clock();
|
||||
idle->flags |= PF_IDLE;
|
||||
|
@@ -7547,6 +7547,19 @@ static void update_blocked_averages(int cpu)
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
update_rq_clock(rq);
|
||||
|
||||
/*
|
||||
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
|
||||
* that RT, DL and IRQ signals have been updated before updating CFS.
|
||||
*/
|
||||
curr_class = rq->curr->sched_class;
|
||||
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
|
||||
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
|
||||
update_irq_load_avg(rq, 0);
|
||||
|
||||
/* Don't need periodic decay once load/util_avg are null */
|
||||
if (others_have_blocked(rq))
|
||||
done = false;
|
||||
|
||||
/*
|
||||
* Iterates the task_group tree in a bottom up fashion, see
|
||||
* list_add_leaf_cfs_rq() for details.
|
||||
@@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu)
|
||||
done = false;
|
||||
}
|
||||
|
||||
curr_class = rq->curr->sched_class;
|
||||
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
|
||||
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
|
||||
update_irq_load_avg(rq, 0);
|
||||
/* Don't need periodic decay once load/util_avg are null */
|
||||
if (others_have_blocked(rq))
|
||||
done = false;
|
||||
|
||||
update_blocked_load_status(rq, !done);
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
}
|
||||
@@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
update_rq_clock(rq);
|
||||
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
|
||||
|
||||
/*
|
||||
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
|
||||
* that RT, DL and IRQ signals have been updated before updating CFS.
|
||||
*/
|
||||
curr_class = rq->curr->sched_class;
|
||||
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
|
||||
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
|
||||
update_irq_load_avg(rq, 0);
|
||||
|
||||
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
|
||||
|
||||
update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
}
|
||||
|
Reference in New Issue
Block a user