rcu: Remove "cpu" argument to rcu_note_context_switch()
The "cpu" argument to rcu_note_context_switch() is always the current CPU, so drop it. This in turn allows the "cpu" argument to rcu_preempt_note_context_switch() to be removed, which allows the sole use of "cpu" in both functions to be replaced with a this_cpu_ptr(). Again, the anticipated cross-CPU uses of these functions has been replaced by NO_HZ_FULL. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
This commit is contained in:
@@ -78,7 +78,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
|
|||||||
call_rcu(head, func);
|
call_rcu(head, func);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rcu_note_context_switch(int cpu)
|
static inline void rcu_note_context_switch(void)
|
||||||
{
|
{
|
||||||
rcu_sched_qs();
|
rcu_sched_qs();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
#ifndef __LINUX_RCUTREE_H
|
#ifndef __LINUX_RCUTREE_H
|
||||||
#define __LINUX_RCUTREE_H
|
#define __LINUX_RCUTREE_H
|
||||||
|
|
||||||
void rcu_note_context_switch(int cpu);
|
void rcu_note_context_switch(void);
|
||||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||||
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
|
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
|
||||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||||
@@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void);
|
|||||||
*/
|
*/
|
||||||
static inline void rcu_virt_note_context_switch(int cpu)
|
static inline void rcu_virt_note_context_switch(int cpu)
|
||||||
{
|
{
|
||||||
rcu_note_context_switch(cpu);
|
rcu_note_context_switch();
|
||||||
}
|
}
|
||||||
|
|
||||||
void synchronize_rcu_bh(void);
|
void synchronize_rcu_bh(void);
|
||||||
|
|||||||
@@ -286,11 +286,11 @@ static void rcu_momentary_dyntick_idle(void)
|
|||||||
* and requires special handling for preemptible RCU.
|
* and requires special handling for preemptible RCU.
|
||||||
* The caller must have disabled preemption.
|
* The caller must have disabled preemption.
|
||||||
*/
|
*/
|
||||||
void rcu_note_context_switch(int cpu)
|
void rcu_note_context_switch(void)
|
||||||
{
|
{
|
||||||
trace_rcu_utilization(TPS("Start context switch"));
|
trace_rcu_utilization(TPS("Start context switch"));
|
||||||
rcu_sched_qs();
|
rcu_sched_qs();
|
||||||
rcu_preempt_note_context_switch(cpu);
|
rcu_preempt_note_context_switch();
|
||||||
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_dyntick_idle();
|
||||||
trace_rcu_utilization(TPS("End context switch"));
|
trace_rcu_utilization(TPS("End context switch"));
|
||||||
|
|||||||
@@ -547,7 +547,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
|
|||||||
/* Forward declarations for rcutree_plugin.h */
|
/* Forward declarations for rcutree_plugin.h */
|
||||||
static void rcu_bootup_announce(void);
|
static void rcu_bootup_announce(void);
|
||||||
long rcu_batches_completed(void);
|
long rcu_batches_completed(void);
|
||||||
static void rcu_preempt_note_context_switch(int cpu);
|
static void rcu_preempt_note_context_switch(void);
|
||||||
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
|
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
|
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ static void rcu_preempt_qs(void)
|
|||||||
*
|
*
|
||||||
* Caller must disable preemption.
|
* Caller must disable preemption.
|
||||||
*/
|
*/
|
||||||
static void rcu_preempt_note_context_switch(int cpu)
|
static void rcu_preempt_note_context_switch(void)
|
||||||
{
|
{
|
||||||
struct task_struct *t = current;
|
struct task_struct *t = current;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@@ -167,7 +167,7 @@ static void rcu_preempt_note_context_switch(int cpu)
|
|||||||
!t->rcu_read_unlock_special.b.blocked) {
|
!t->rcu_read_unlock_special.b.blocked) {
|
||||||
|
|
||||||
/* Possibly blocking in an RCU read-side critical section. */
|
/* Possibly blocking in an RCU read-side critical section. */
|
||||||
rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
|
rdp = this_cpu_ptr(rcu_preempt_state.rda);
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||||
smp_mb__after_unlock_lock();
|
smp_mb__after_unlock_lock();
|
||||||
@@ -945,7 +945,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
|||||||
* Because preemptible RCU does not exist, we never have to check for
|
* Because preemptible RCU does not exist, we never have to check for
|
||||||
* CPUs being in quiescent states.
|
* CPUs being in quiescent states.
|
||||||
*/
|
*/
|
||||||
static void rcu_preempt_note_context_switch(int cpu)
|
static void rcu_preempt_note_context_switch(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2802,7 +2802,7 @@ need_resched:
|
|||||||
preempt_disable();
|
preempt_disable();
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
rq = cpu_rq(cpu);
|
rq = cpu_rq(cpu);
|
||||||
rcu_note_context_switch(cpu);
|
rcu_note_context_switch();
|
||||||
prev = rq->curr;
|
prev = rq->curr;
|
||||||
|
|
||||||
schedule_debug(prev);
|
schedule_debug(prev);
|
||||||
|
|||||||
@@ -656,7 +656,7 @@ static void run_ksoftirqd(unsigned int cpu)
|
|||||||
* in the task stack here.
|
* in the task stack here.
|
||||||
*/
|
*/
|
||||||
__do_softirq();
|
__do_softirq();
|
||||||
rcu_note_context_switch(cpu);
|
rcu_note_context_switch();
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
cond_resched();
|
cond_resched();
|
||||||
return;
|
return;
|
||||||
|
|||||||
Reference in New Issue
Block a user