smp.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Copyright (C) 2000, 2001 Kanoj Sarcar
  5. * Copyright (C) 2000, 2001 Ralf Baechle
  6. * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  7. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  8. */
  9. #include <linux/cache.h>
  10. #include <linux/delay.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/smp.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/threads.h>
  16. #include <linux/export.h>
  17. #include <linux/time.h>
  18. #include <linux/timex.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/cpu.h>
  22. #include <linux/err.h>
  23. #include <linux/ftrace.h>
  24. #include <linux/irqdomain.h>
  25. #include <linux/of.h>
  26. #include <linux/of_irq.h>
  27. #include <linux/atomic.h>
  28. #include <asm/cpu.h>
  29. #include <asm/ginvt.h>
  30. #include <asm/processor.h>
  31. #include <asm/idle.h>
  32. #include <asm/r4k-timer.h>
  33. #include <asm/mips-cps.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/time.h>
  36. #include <asm/setup.h>
  37. #include <asm/maar.h>
  38. int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
  39. EXPORT_SYMBOL(__cpu_number_map);
  40. int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
  41. EXPORT_SYMBOL(__cpu_logical_map);
  42. /* Number of TCs (or siblings in Intel speak) per CPU core */
  43. int smp_num_siblings = 1;
  44. EXPORT_SYMBOL(smp_num_siblings);
  45. /* representing the TCs (or siblings in Intel speak) of each logical CPU */
  46. cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
  47. EXPORT_SYMBOL(cpu_sibling_map);
  48. /* representing the core map of multi-core chips of each logical CPU */
  49. cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
  50. EXPORT_SYMBOL(cpu_core_map);
  51. static DECLARE_COMPLETION(cpu_starting);
  52. static DECLARE_COMPLETION(cpu_running);
  53. /*
  54. * A logical cpu mask containing only one VPE per core to
  55. * reduce the number of IPIs on large MT systems.
  56. */
  57. cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
  58. EXPORT_SYMBOL(cpu_foreign_map);
  59. /* representing cpus for which sibling maps can be computed */
  60. static cpumask_t cpu_sibling_setup_map;
  61. /* representing cpus for which core maps can be computed */
  62. static cpumask_t cpu_core_setup_map;
  63. cpumask_t cpu_coherent_mask;
  64. #ifdef CONFIG_GENERIC_IRQ_IPI
  65. static struct irq_desc *call_desc;
  66. static struct irq_desc *sched_desc;
  67. #endif
  68. static inline void set_cpu_sibling_map(int cpu)
  69. {
  70. int i;
  71. cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
  72. if (smp_num_siblings > 1) {
  73. for_each_cpu(i, &cpu_sibling_setup_map) {
  74. if (cpus_are_siblings(cpu, i)) {
  75. cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
  76. cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
  77. }
  78. }
  79. } else
  80. cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
  81. }
  82. static inline void set_cpu_core_map(int cpu)
  83. {
  84. int i;
  85. cpumask_set_cpu(cpu, &cpu_core_setup_map);
  86. for_each_cpu(i, &cpu_core_setup_map) {
  87. if (cpu_data[cpu].package == cpu_data[i].package) {
  88. cpumask_set_cpu(i, &cpu_core_map[cpu]);
  89. cpumask_set_cpu(cpu, &cpu_core_map[i]);
  90. }
  91. }
  92. }
  93. /*
  94. * Calculate a new cpu_foreign_map mask whenever a
  95. * new cpu appears or disappears.
  96. */
  97. void calculate_cpu_foreign_map(void)
  98. {
  99. int i, k, core_present;
  100. cpumask_t temp_foreign_map;
  101. /* Re-calculate the mask */
  102. cpumask_clear(&temp_foreign_map);
  103. for_each_online_cpu(i) {
  104. core_present = 0;
  105. for_each_cpu(k, &temp_foreign_map)
  106. if (cpus_are_siblings(i, k))
  107. core_present = 1;
  108. if (!core_present)
  109. cpumask_set_cpu(i, &temp_foreign_map);
  110. }
  111. for_each_online_cpu(i)
  112. cpumask_andnot(&cpu_foreign_map[i],
  113. &temp_foreign_map, &cpu_sibling_map[i]);
  114. }
  115. const struct plat_smp_ops *mp_ops;
  116. EXPORT_SYMBOL(mp_ops);
  117. void register_smp_ops(const struct plat_smp_ops *ops)
  118. {
  119. if (mp_ops)
  120. printk(KERN_WARNING "Overriding previously set SMP ops\n");
  121. mp_ops = ops;
  122. }
  123. #ifdef CONFIG_GENERIC_IRQ_IPI
  124. void mips_smp_send_ipi_single(int cpu, unsigned int action)
  125. {
  126. mips_smp_send_ipi_mask(cpumask_of(cpu), action);
  127. }
  128. void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
  129. {
  130. unsigned long flags;
  131. unsigned int core;
  132. int cpu;
  133. local_irq_save(flags);
  134. switch (action) {
  135. case SMP_CALL_FUNCTION:
  136. __ipi_send_mask(call_desc, mask);
  137. break;
  138. case SMP_RESCHEDULE_YOURSELF:
  139. __ipi_send_mask(sched_desc, mask);
  140. break;
  141. default:
  142. BUG();
  143. }
  144. if (mips_cpc_present()) {
  145. for_each_cpu(cpu, mask) {
  146. if (cpus_are_siblings(cpu, smp_processor_id()))
  147. continue;
  148. core = cpu_core(&cpu_data[cpu]);
  149. while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
  150. mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
  151. mips_cpc_lock_other(core);
  152. write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
  153. mips_cpc_unlock_other();
  154. mips_cm_unlock_other();
  155. }
  156. }
  157. }
  158. local_irq_restore(flags);
  159. }
  160. static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
  161. {
  162. scheduler_ipi();
  163. return IRQ_HANDLED;
  164. }
  165. static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
  166. {
  167. generic_smp_call_function_interrupt();
  168. return IRQ_HANDLED;
  169. }
  170. static void smp_ipi_init_one(unsigned int virq, const char *name,
  171. irq_handler_t handler)
  172. {
  173. int ret;
  174. irq_set_handler(virq, handle_percpu_irq);
  175. ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
  176. BUG_ON(ret);
  177. }
  178. static unsigned int call_virq, sched_virq;
  179. int mips_smp_ipi_allocate(const struct cpumask *mask)
  180. {
  181. int virq;
  182. struct irq_domain *ipidomain;
  183. struct device_node *node;
  184. node = of_irq_find_parent(of_root);
  185. ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
  186. /*
  187. * Some platforms have half DT setup. So if we found irq node but
  188. * didn't find an ipidomain, try to search for one that is not in the
  189. * DT.
  190. */
  191. if (node && !ipidomain)
  192. ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
  193. /*
  194. * There are systems which use IPI IRQ domains, but only have one
  195. * registered when some runtime condition is met. For example a Malta
  196. * kernel may include support for GIC & CPU interrupt controller IPI
  197. * IRQ domains, but if run on a system with no GIC & no MT ASE then
  198. * neither will be supported or registered.
  199. *
  200. * We only have a problem if we're actually using multiple CPUs so fail
  201. * loudly if that is the case. Otherwise simply return, skipping IPI
  202. * setup, if we're running with only a single CPU.
  203. */
  204. if (!ipidomain) {
  205. BUG_ON(num_present_cpus() > 1);
  206. return 0;
  207. }
  208. virq = irq_reserve_ipi(ipidomain, mask);
  209. BUG_ON(!virq);
  210. if (!call_virq)
  211. call_virq = virq;
  212. virq = irq_reserve_ipi(ipidomain, mask);
  213. BUG_ON(!virq);
  214. if (!sched_virq)
  215. sched_virq = virq;
  216. if (irq_domain_is_ipi_per_cpu(ipidomain)) {
  217. int cpu;
  218. for_each_cpu(cpu, mask) {
  219. smp_ipi_init_one(call_virq + cpu, "IPI call",
  220. ipi_call_interrupt);
  221. smp_ipi_init_one(sched_virq + cpu, "IPI resched",
  222. ipi_resched_interrupt);
  223. }
  224. } else {
  225. smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
  226. smp_ipi_init_one(sched_virq, "IPI resched",
  227. ipi_resched_interrupt);
  228. }
  229. return 0;
  230. }
  231. int mips_smp_ipi_free(const struct cpumask *mask)
  232. {
  233. struct irq_domain *ipidomain;
  234. struct device_node *node;
  235. node = of_irq_find_parent(of_root);
  236. ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
  237. /*
  238. * Some platforms have half DT setup. So if we found irq node but
  239. * didn't find an ipidomain, try to search for one that is not in the
  240. * DT.
  241. */
  242. if (node && !ipidomain)
  243. ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
  244. BUG_ON(!ipidomain);
  245. if (irq_domain_is_ipi_per_cpu(ipidomain)) {
  246. int cpu;
  247. for_each_cpu(cpu, mask) {
  248. free_irq(call_virq + cpu, NULL);
  249. free_irq(sched_virq + cpu, NULL);
  250. }
  251. }
  252. irq_destroy_ipi(call_virq, mask);
  253. irq_destroy_ipi(sched_virq, mask);
  254. return 0;
  255. }
  256. static int __init mips_smp_ipi_init(void)
  257. {
  258. if (num_possible_cpus() == 1)
  259. return 0;
  260. mips_smp_ipi_allocate(cpu_possible_mask);
  261. call_desc = irq_to_desc(call_virq);
  262. sched_desc = irq_to_desc(sched_virq);
  263. return 0;
  264. }
  265. early_initcall(mips_smp_ipi_init);
  266. #endif
  267. /*
  268. * First C code run on the secondary CPUs after being started up by
  269. * the master.
  270. */
  271. asmlinkage void start_secondary(void)
  272. {
  273. unsigned int cpu;
  274. cpu_probe();
  275. per_cpu_trap_init(false);
  276. mips_clockevent_init();
  277. mp_ops->init_secondary();
  278. cpu_report();
  279. maar_init();
  280. /*
  281. * XXX parity protection should be folded in here when it's converted
  282. * to an option instead of something based on .cputype
  283. */
  284. calibrate_delay();
  285. cpu = smp_processor_id();
  286. cpu_data[cpu].udelay_val = loops_per_jiffy;
  287. set_cpu_sibling_map(cpu);
  288. set_cpu_core_map(cpu);
  289. cpumask_set_cpu(cpu, &cpu_coherent_mask);
  290. notify_cpu_starting(cpu);
  291. /* Notify boot CPU that we're starting & ready to sync counters */
  292. complete(&cpu_starting);
  293. synchronise_count_slave(cpu);
  294. /* The CPU is running and counters synchronised, now mark it online */
  295. set_cpu_online(cpu, true);
  296. calculate_cpu_foreign_map();
  297. /*
  298. * Notify boot CPU that we're up & online and it can safely return
  299. * from __cpu_up
  300. */
  301. complete(&cpu_running);
  302. /*
  303. * irq will be enabled in ->smp_finish(), enabling it too early
  304. * is dangerous.
  305. */
  306. WARN_ON_ONCE(!irqs_disabled());
  307. mp_ops->smp_finish();
  308. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  309. }
  310. static void stop_this_cpu(void *dummy)
  311. {
  312. /*
  313. * Remove this CPU:
  314. */
  315. set_cpu_online(smp_processor_id(), false);
  316. calculate_cpu_foreign_map();
  317. local_irq_disable();
  318. while (1);
  319. }
  320. void smp_send_stop(void)
  321. {
  322. smp_call_function(stop_this_cpu, NULL, 0);
  323. }
  324. void __init smp_cpus_done(unsigned int max_cpus)
  325. {
  326. }
  327. /* called from main before smp_init() */
  328. void __init smp_prepare_cpus(unsigned int max_cpus)
  329. {
  330. init_new_context(current, &init_mm);
  331. current_thread_info()->cpu = 0;
  332. mp_ops->prepare_cpus(max_cpus);
  333. set_cpu_sibling_map(0);
  334. set_cpu_core_map(0);
  335. calculate_cpu_foreign_map();
  336. #ifndef CONFIG_HOTPLUG_CPU
  337. init_cpu_present(cpu_possible_mask);
  338. #endif
  339. cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
  340. }
  341. /* preload SMP state for boot cpu */
  342. void smp_prepare_boot_cpu(void)
  343. {
  344. if (mp_ops->prepare_boot_cpu)
  345. mp_ops->prepare_boot_cpu();
  346. set_cpu_possible(0, true);
  347. set_cpu_online(0, true);
  348. }
  349. int __cpu_up(unsigned int cpu, struct task_struct *tidle)
  350. {
  351. int err;
  352. err = mp_ops->boot_secondary(cpu, tidle);
  353. if (err)
  354. return err;
  355. /* Wait for CPU to start and be ready to sync counters */
  356. if (!wait_for_completion_timeout(&cpu_starting,
  357. msecs_to_jiffies(1000))) {
  358. pr_crit("CPU%u: failed to start\n", cpu);
  359. return -EIO;
  360. }
  361. synchronise_count_master(cpu);
  362. /* Wait for CPU to finish startup & mark itself online before return */
  363. wait_for_completion(&cpu_running);
  364. return 0;
  365. }
  366. /* Not really SMP stuff ... */
  367. int setup_profiling_timer(unsigned int multiplier)
  368. {
  369. return 0;
  370. }
  371. static void flush_tlb_all_ipi(void *info)
  372. {
  373. local_flush_tlb_all();
  374. }
  375. void flush_tlb_all(void)
  376. {
  377. if (cpu_has_mmid) {
  378. htw_stop();
  379. ginvt_full();
  380. sync_ginv();
  381. instruction_hazard();
  382. htw_start();
  383. return;
  384. }
  385. on_each_cpu(flush_tlb_all_ipi, NULL, 1);
  386. }
  387. static void flush_tlb_mm_ipi(void *mm)
  388. {
  389. drop_mmu_context((struct mm_struct *)mm);
  390. }
  391. /*
  392. * Special Variant of smp_call_function for use by TLB functions:
  393. *
  394. * o No return value
  395. * o collapses to normal function call on UP kernels
  396. * o collapses to normal function call on systems with a single shared
  397. * primary cache.
  398. */
  399. static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
  400. {
  401. smp_call_function(func, info, 1);
  402. }
  403. static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
  404. {
  405. preempt_disable();
  406. smp_on_other_tlbs(func, info);
  407. func(info);
  408. preempt_enable();
  409. }
  410. /*
  411. * The following tlb flush calls are invoked when old translations are
  412. * being torn down, or pte attributes are changing. For single threaded
  413. * address spaces, a new context is obtained on the current cpu, and tlb
  414. * context on other cpus are invalidated to force a new context allocation
  415. * at switch_mm time, should the mm ever be used on other cpus. For
  416. * multithreaded address spaces, inter-CPU interrupts have to be sent.
  417. * Another case where inter-CPU interrupts are required is when the target
  418. * mm might be active on another cpu (eg debuggers doing the flushes on
  419. * behalf of debugees, kswapd stealing pages from another process etc).
  420. * Kanoj 07/00.
  421. */
  422. void flush_tlb_mm(struct mm_struct *mm)
  423. {
  424. if (!mm)
  425. return;
  426. if (atomic_read(&mm->mm_users) == 0)
  427. return; /* happens as a result of exit_mmap() */
  428. preempt_disable();
  429. if (cpu_has_mmid) {
  430. /*
  431. * No need to worry about other CPUs - the ginvt in
  432. * drop_mmu_context() will be globalized.
  433. */
  434. } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
  435. smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
  436. } else {
  437. unsigned int cpu;
  438. for_each_online_cpu(cpu) {
  439. if (cpu != smp_processor_id() && cpu_context(cpu, mm))
  440. set_cpu_context(cpu, mm, 0);
  441. }
  442. }
  443. drop_mmu_context(mm);
  444. preempt_enable();
  445. }
  446. struct flush_tlb_data {
  447. struct vm_area_struct *vma;
  448. unsigned long addr1;
  449. unsigned long addr2;
  450. };
  451. static void flush_tlb_range_ipi(void *info)
  452. {
  453. struct flush_tlb_data *fd = info;
  454. local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
  455. }
  456. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  457. {
  458. struct mm_struct *mm = vma->vm_mm;
  459. unsigned long addr;
  460. u32 old_mmid;
  461. preempt_disable();
  462. if (cpu_has_mmid) {
  463. htw_stop();
  464. old_mmid = read_c0_memorymapid();
  465. write_c0_memorymapid(cpu_asid(0, mm));
  466. mtc0_tlbw_hazard();
  467. addr = round_down(start, PAGE_SIZE * 2);
  468. end = round_up(end, PAGE_SIZE * 2);
  469. do {
  470. ginvt_va_mmid(addr);
  471. sync_ginv();
  472. addr += PAGE_SIZE * 2;
  473. } while (addr < end);
  474. write_c0_memorymapid(old_mmid);
  475. instruction_hazard();
  476. htw_start();
  477. } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
  478. struct flush_tlb_data fd = {
  479. .vma = vma,
  480. .addr1 = start,
  481. .addr2 = end,
  482. };
  483. smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
  484. local_flush_tlb_range(vma, start, end);
  485. } else {
  486. unsigned int cpu;
  487. int exec = vma->vm_flags & VM_EXEC;
  488. for_each_online_cpu(cpu) {
  489. /*
  490. * flush_cache_range() will only fully flush icache if
  491. * the VMA is executable, otherwise we must invalidate
  492. * ASID without it appearing to has_valid_asid() as if
  493. * mm has been completely unused by that CPU.
  494. */
  495. if (cpu != smp_processor_id() && cpu_context(cpu, mm))
  496. set_cpu_context(cpu, mm, !exec);
  497. }
  498. local_flush_tlb_range(vma, start, end);
  499. }
  500. preempt_enable();
  501. }
  502. static void flush_tlb_kernel_range_ipi(void *info)
  503. {
  504. struct flush_tlb_data *fd = info;
  505. local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
  506. }
  507. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  508. {
  509. struct flush_tlb_data fd = {
  510. .addr1 = start,
  511. .addr2 = end,
  512. };
  513. on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
  514. }
  515. static void flush_tlb_page_ipi(void *info)
  516. {
  517. struct flush_tlb_data *fd = info;
  518. local_flush_tlb_page(fd->vma, fd->addr1);
  519. }
  520. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  521. {
  522. u32 old_mmid;
  523. preempt_disable();
  524. if (cpu_has_mmid) {
  525. htw_stop();
  526. old_mmid = read_c0_memorymapid();
  527. write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
  528. mtc0_tlbw_hazard();
  529. ginvt_va_mmid(page);
  530. sync_ginv();
  531. write_c0_memorymapid(old_mmid);
  532. instruction_hazard();
  533. htw_start();
  534. } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
  535. (current->mm != vma->vm_mm)) {
  536. struct flush_tlb_data fd = {
  537. .vma = vma,
  538. .addr1 = page,
  539. };
  540. smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
  541. local_flush_tlb_page(vma, page);
  542. } else {
  543. unsigned int cpu;
  544. for_each_online_cpu(cpu) {
  545. /*
  546. * flush_cache_page() only does partial flushes, so
  547. * invalidate ASID without it appearing to
  548. * has_valid_asid() as if mm has been completely unused
  549. * by that CPU.
  550. */
  551. if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
  552. set_cpu_context(cpu, vma->vm_mm, 1);
  553. }
  554. local_flush_tlb_page(vma, page);
  555. }
  556. preempt_enable();
  557. }
  558. static void flush_tlb_one_ipi(void *info)
  559. {
  560. unsigned long vaddr = (unsigned long) info;
  561. local_flush_tlb_one(vaddr);
  562. }
  563. void flush_tlb_one(unsigned long vaddr)
  564. {
  565. smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
  566. }
  567. EXPORT_SYMBOL(flush_tlb_page);
  568. EXPORT_SYMBOL(flush_tlb_one);
  569. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  570. static void tick_broadcast_callee(void *info)
  571. {
  572. tick_receive_broadcast();
  573. }
  574. static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
  575. CSD_INIT(tick_broadcast_callee, NULL);
  576. void tick_broadcast(const struct cpumask *mask)
  577. {
  578. call_single_data_t *csd;
  579. int cpu;
  580. for_each_cpu(cpu, mask) {
  581. csd = &per_cpu(tick_broadcast_csd, cpu);
  582. smp_call_function_single_async(cpu, csd);
  583. }
  584. }
  585. #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */