Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ieee802154/fakehard.c A bug fix went into 'net' for ieee802154/fakehard.c, which is removed in 'net-next'. Add build fix into the merge from Stephen Rothwell in openvswitch, the logging macros take a new initial 'log' argument, a new call was added in 'net' so when we merge that in here we have to explicitly add the new 'log' arg to it else the build fails. Signed-off-by: David S. Miller <davem@davemloft.net>
Cette révision appartient à :
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
|
||||
|
||||
if (!task) {
|
||||
/*
|
||||
* Per cpu events are removed via an smp call and
|
||||
* the removal is always successful.
|
||||
* Per cpu events are removed via an smp call. The removal can
|
||||
* fail if the CPU is currently offline, but in that case we
|
||||
* already called __perf_remove_from_context from
|
||||
* perf_event_exit_cpu.
|
||||
*/
|
||||
cpu_function_call(event->cpu, __perf_remove_from_context, &re);
|
||||
return;
|
||||
@@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
|
||||
|
||||
static void __perf_event_exit_context(void *__info)
|
||||
{
|
||||
struct remove_event re = { .detach_group = false };
|
||||
struct remove_event re = { .detach_group = true };
|
||||
struct perf_event_context *ctx = __info;
|
||||
|
||||
perf_pmu_rotate_stop(ctx->pmu);
|
||||
|
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
|
||||
|
||||
static int platform_suspend_prepare_late(suspend_state_t state)
|
||||
{
|
||||
return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ?
|
||||
return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
|
||||
freeze_ops->prepare() : 0;
|
||||
}
|
||||
|
||||
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
|
||||
|
||||
static void platform_resume_early(suspend_state_t state)
|
||||
{
|
||||
if (state == PM_SUSPEND_FREEZE && freeze_ops->restore)
|
||||
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
|
||||
freeze_ops->restore();
|
||||
}
|
||||
|
||||
|
@@ -2474,44 +2474,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
||||
EXPORT_PER_CPU_SYMBOL(kstat);
|
||||
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
||||
|
||||
/*
|
||||
* Return any ns on the sched_clock that have not yet been accounted in
|
||||
* @p in case that task is currently running.
|
||||
*
|
||||
* Called with task_rq_lock() held on @rq.
|
||||
*/
|
||||
static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
u64 ns = 0;
|
||||
|
||||
/*
|
||||
* Must be ->curr _and_ ->on_rq. If dequeued, we would
|
||||
* project cycles that may never be accounted to this
|
||||
* thread, breaking clock_gettime().
|
||||
*/
|
||||
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
||||
update_rq_clock(rq);
|
||||
ns = rq_clock_task(rq) - p->se.exec_start;
|
||||
if ((s64)ns < 0)
|
||||
ns = 0;
|
||||
}
|
||||
|
||||
return ns;
|
||||
}
|
||||
|
||||
unsigned long long task_delta_exec(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
u64 ns = 0;
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
ns = do_task_delta_exec(p, rq);
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
|
||||
return ns;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return accounted runtime for the task.
|
||||
* In case the task is currently running, return the runtime plus current's
|
||||
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
u64 ns = 0;
|
||||
u64 ns;
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
|
||||
/*
|
||||
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
||||
#endif
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
|
||||
/*
|
||||
* Must be ->curr _and_ ->on_rq. If dequeued, we would
|
||||
* project cycles that may never be accounted to this
|
||||
* thread, breaking clock_gettime().
|
||||
*/
|
||||
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
||||
update_rq_clock(rq);
|
||||
p->sched_class->update_curr(rq);
|
||||
}
|
||||
ns = p->se.sum_exec_runtime;
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
|
||||
return ns;
|
||||
@@ -6368,6 +6339,10 @@ static void sched_init_numa(void)
|
||||
if (!sched_debug())
|
||||
break;
|
||||
}
|
||||
|
||||
if (!level)
|
||||
return;
|
||||
|
||||
/*
|
||||
* 'level' contains the number of unique distances, excluding the
|
||||
* identity distance node_distance(i,i).
|
||||
@@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk)
|
||||
if (unlikely(running))
|
||||
put_prev_task(rq, tsk);
|
||||
|
||||
tg = container_of(task_css_check(tsk, cpu_cgrp_id,
|
||||
lockdep_is_held(&tsk->sighand->siglock)),
|
||||
/*
|
||||
* All callers are synchronized by task_rq_lock(); we do not use RCU
|
||||
* which is pointless here. Thus, we pass "true" to task_css_check()
|
||||
* to prevent lockdep warnings.
|
||||
*/
|
||||
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
||||
struct task_group, css);
|
||||
tg = autogroup_task_group(tsk, tg);
|
||||
tsk->sched_task_group = tg;
|
||||
|
@@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = {
|
||||
.prio_changed = prio_changed_dl,
|
||||
.switched_from = switched_from_dl,
|
||||
.switched_to = switched_to_dl,
|
||||
|
||||
.update_curr = update_curr_dl,
|
||||
};
|
||||
|
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
||||
account_cfs_rq_runtime(cfs_rq, delta_exec);
|
||||
}
|
||||
|
||||
static void update_curr_fair(struct rq *rq)
|
||||
{
|
||||
update_curr(cfs_rq_of(&rq->curr->se));
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
@@ -1179,6 +1184,13 @@ static void task_numa_compare(struct task_numa_env *env,
|
||||
cur = NULL;
|
||||
raw_spin_unlock_irq(&dst_rq->lock);
|
||||
|
||||
/*
|
||||
* Because we have preemption enabled we can get migrated around and
|
||||
* end try selecting ourselves (current == env->p) as a swap candidate.
|
||||
*/
|
||||
if (cur == env->p)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* "imp" is the fault differential for the source task between the
|
||||
* source and destination node. Calculate the total differential for
|
||||
@@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = {
|
||||
|
||||
.get_rr_interval = get_rr_interval_fair,
|
||||
|
||||
.update_curr = update_curr_fair,
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
.task_move_group = task_move_group_fair,
|
||||
#endif
|
||||
|
@@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = {
|
||||
|
||||
.prio_changed = prio_changed_rt,
|
||||
.switched_to = switched_to_rt,
|
||||
|
||||
.update_curr = update_curr_rt,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
|
@@ -1135,6 +1135,8 @@ struct sched_class {
|
||||
unsigned int (*get_rr_interval) (struct rq *rq,
|
||||
struct task_struct *task);
|
||||
|
||||
void (*update_curr) (struct rq *rq);
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
void (*task_move_group) (struct task_struct *p, int on_rq);
|
||||
#endif
|
||||
|
@@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
|
||||
*sample = cputime_to_expires(cputime.utime);
|
||||
break;
|
||||
case CPUCLOCK_SCHED:
|
||||
*sample = cputime.sum_exec_runtime + task_delta_exec(p);
|
||||
*sample = cputime.sum_exec_runtime;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
Référencer dans un nouveau ticket
Bloquer un utilisateur