sparc32: move smp ipi to method ops
I ended up renaming set_cpu_int to send_ipi to be consistent all way around. send_ipi was moved to the *_smp.c files so we could call the relevant method direct, without any _ops indirection. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
c68e5d39a5
commit
4ba22b16bb
@@ -40,6 +40,8 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
|
||||
|
||||
cpumask_t smp_commenced_mask = CPU_MASK_NONE;
|
||||
|
||||
const struct sparc32_ipi_ops *sparc32_ipi_ops;
|
||||
|
||||
/* The only guaranteed locking primitive available on all Sparc
|
||||
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
|
||||
* places the current byte at the effective address into dest_reg and
|
||||
@@ -124,7 +126,7 @@ void smp_send_reschedule(int cpu)
|
||||
* a single CPU. The trap handler needs only to do trap entry/return
|
||||
* to call schedule.
|
||||
*/
|
||||
BTFIXUP_CALL(smp_ipi_resched)(cpu);
|
||||
sparc32_ipi_ops->resched(cpu);
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
@@ -134,7 +136,7 @@ void smp_send_stop(void)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
/* trigger one IPI single call on one CPU */
|
||||
BTFIXUP_CALL(smp_ipi_single)(cpu);
|
||||
sparc32_ipi_ops->single(cpu);
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
@@ -143,7 +145,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
|
||||
/* trigger IPI mask call on each CPU */
|
||||
for_each_cpu(cpu, mask)
|
||||
BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
|
||||
sparc32_ipi_ops->mask_one(cpu);
|
||||
}
|
||||
|
||||
void smp_resched_interrupt(void)
|
||||
|
Reference in New Issue
Block a user