smp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. *
  5. * Derived from MIPS:
  6. * Copyright (C) 2000, 2001 Kanoj Sarcar
  7. * Copyright (C) 2000, 2001 Ralf Baechle
  8. * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  9. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  10. */
  11. #include <linux/cpu.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/smp.h>
  17. #include <linux/threads.h>
  18. #include <linux/export.h>
  19. #include <linux/time.h>
  20. #include <linux/tracepoint.h>
  21. #include <linux/sched/hotplug.h>
  22. #include <linux/sched/task_stack.h>
  23. #include <asm/cpu.h>
  24. #include <asm/idle.h>
  25. #include <asm/loongson.h>
  26. #include <asm/mmu_context.h>
  27. #include <asm/numa.h>
  28. #include <asm/processor.h>
  29. #include <asm/setup.h>
  30. #include <asm/time.h>
  31. int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
  32. EXPORT_SYMBOL(__cpu_number_map);
  33. int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
  34. EXPORT_SYMBOL(__cpu_logical_map);
  35. /* Number of threads (siblings) per CPU core */
  36. int smp_num_siblings = 1;
  37. EXPORT_SYMBOL(smp_num_siblings);
  38. /* Representing the threads (siblings) of each logical CPU */
  39. cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
  40. EXPORT_SYMBOL(cpu_sibling_map);
  41. /* Representing the core map of multi-core chips of each logical CPU */
  42. cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
  43. EXPORT_SYMBOL(cpu_core_map);
  44. static DECLARE_COMPLETION(cpu_starting);
  45. static DECLARE_COMPLETION(cpu_running);
  46. /*
  47. * A logcal cpu mask containing only one VPE per core to
  48. * reduce the number of IPIs on large MT systems.
  49. */
  50. cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
  51. EXPORT_SYMBOL(cpu_foreign_map);
  52. /* representing cpus for which sibling maps can be computed */
  53. static cpumask_t cpu_sibling_setup_map;
  54. /* representing cpus for which core maps can be computed */
  55. static cpumask_t cpu_core_setup_map;
  56. struct secondary_data cpuboot_data;
  57. static DEFINE_PER_CPU(int, cpu_state);
  58. enum ipi_msg_type {
  59. IPI_RESCHEDULE,
  60. IPI_CALL_FUNCTION,
  61. };
  62. static const char *ipi_types[NR_IPI] __tracepoint_string = {
  63. [IPI_RESCHEDULE] = "Rescheduling interrupts",
  64. [IPI_CALL_FUNCTION] = "Function call interrupts",
  65. };
  66. void show_ipi_list(struct seq_file *p, int prec)
  67. {
  68. unsigned int cpu, i;
  69. for (i = 0; i < NR_IPI; i++) {
  70. seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
  71. for_each_online_cpu(cpu)
  72. seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
  73. seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
  74. }
  75. }
  76. /* Send mailbox buffer via Mail_Send */
  77. static void csr_mail_send(uint64_t data, int cpu, int mailbox)
  78. {
  79. uint64_t val;
  80. /* Send high 32 bits */
  81. val = IOCSR_MBUF_SEND_BLOCKING;
  82. val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
  83. val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
  84. val |= (data & IOCSR_MBUF_SEND_H32_MASK);
  85. iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
  86. /* Send low 32 bits */
  87. val = IOCSR_MBUF_SEND_BLOCKING;
  88. val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
  89. val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
  90. val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
  91. iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
  92. };
  93. static u32 ipi_read_clear(int cpu)
  94. {
  95. u32 action;
  96. /* Load the ipi register to figure out what we're supposed to do */
  97. action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
  98. /* Clear the ipi register to clear the interrupt */
  99. iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
  100. smp_mb();
  101. return action;
  102. }
  103. static void ipi_write_action(int cpu, u32 action)
  104. {
  105. unsigned int irq = 0;
  106. while ((irq = ffs(action))) {
  107. uint32_t val = IOCSR_IPI_SEND_BLOCKING;
  108. val |= (irq - 1);
  109. val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
  110. iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
  111. action &= ~BIT(irq - 1);
  112. }
  113. }
  114. void loongson_send_ipi_single(int cpu, unsigned int action)
  115. {
  116. ipi_write_action(cpu_logical_map(cpu), (u32)action);
  117. }
  118. void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
  119. {
  120. unsigned int i;
  121. for_each_cpu(i, mask)
  122. ipi_write_action(cpu_logical_map(i), (u32)action);
  123. }
  124. /*
  125. * This function sends a 'reschedule' IPI to another CPU.
  126. * it goes straight through and wastes no time serializing
  127. * anything. Worst case is that we lose a reschedule ...
  128. */
  129. void smp_send_reschedule(int cpu)
  130. {
  131. loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
  132. }
  133. EXPORT_SYMBOL_GPL(smp_send_reschedule);
  134. irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
  135. {
  136. unsigned int action;
  137. unsigned int cpu = smp_processor_id();
  138. action = ipi_read_clear(cpu_logical_map(cpu));
  139. if (action & SMP_RESCHEDULE) {
  140. scheduler_ipi();
  141. per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
  142. }
  143. if (action & SMP_CALL_FUNCTION) {
  144. generic_smp_call_function_interrupt();
  145. per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
  146. }
  147. return IRQ_HANDLED;
  148. }
  149. void __init loongson_smp_setup(void)
  150. {
  151. cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
  152. cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
  153. iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
  154. pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
  155. }
  156. void __init loongson_prepare_cpus(unsigned int max_cpus)
  157. {
  158. int i = 0;
  159. for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
  160. set_cpu_present(i, true);
  161. csr_mail_send(0, __cpu_logical_map[i], 0);
  162. }
  163. per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
  164. }
  165. /*
  166. * Setup the PC, SP, and TP of a secondary processor and start it running!
  167. */
  168. void loongson_boot_secondary(int cpu, struct task_struct *idle)
  169. {
  170. unsigned long entry;
  171. pr_info("Booting CPU#%d...\n", cpu);
  172. entry = __pa_symbol((unsigned long)&smpboot_entry);
  173. cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
  174. cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
  175. csr_mail_send(entry, cpu_logical_map(cpu), 0);
  176. loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
  177. }
  178. /*
  179. * SMP init and finish on secondary CPUs
  180. */
  181. void loongson_init_secondary(void)
  182. {
  183. unsigned int cpu = smp_processor_id();
  184. unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
  185. ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
  186. change_csr_ecfg(ECFG0_IM, imask);
  187. iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
  188. #ifdef CONFIG_NUMA
  189. numa_add_cpu(cpu);
  190. #endif
  191. per_cpu(cpu_state, cpu) = CPU_ONLINE;
  192. cpu_data[cpu].core =
  193. cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
  194. cpu_data[cpu].package =
  195. cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
  196. }
  197. void loongson_smp_finish(void)
  198. {
  199. local_irq_enable();
  200. iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
  201. pr_info("CPU#%d finished\n", smp_processor_id());
  202. }
  203. #ifdef CONFIG_HOTPLUG_CPU
  204. int loongson_cpu_disable(void)
  205. {
  206. unsigned long flags;
  207. unsigned int cpu = smp_processor_id();
  208. if (io_master(cpu))
  209. return -EBUSY;
  210. #ifdef CONFIG_NUMA
  211. numa_remove_cpu(cpu);
  212. #endif
  213. set_cpu_online(cpu, false);
  214. calculate_cpu_foreign_map();
  215. local_irq_save(flags);
  216. irq_migrate_all_off_this_cpu();
  217. clear_csr_ecfg(ECFG0_IM);
  218. local_irq_restore(flags);
  219. local_flush_tlb_all();
  220. return 0;
  221. }
  222. void loongson_cpu_die(unsigned int cpu)
  223. {
  224. while (per_cpu(cpu_state, cpu) != CPU_DEAD)
  225. cpu_relax();
  226. mb();
  227. }
  228. void play_dead(void)
  229. {
  230. register uint64_t addr;
  231. register void (*init_fn)(void);
  232. idle_task_exit();
  233. local_irq_enable();
  234. set_csr_ecfg(ECFGF_IPI);
  235. __this_cpu_write(cpu_state, CPU_DEAD);
  236. __smp_mb();
  237. do {
  238. __asm__ __volatile__("idle 0\n\t");
  239. addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
  240. } while (addr == 0);
  241. init_fn = (void *)TO_CACHE(addr);
  242. iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
  243. init_fn();
  244. unreachable();
  245. }
  246. #endif
  247. /*
  248. * Power management
  249. */
  250. #ifdef CONFIG_PM
  251. static int loongson_ipi_suspend(void)
  252. {
  253. return 0;
  254. }
  255. static void loongson_ipi_resume(void)
  256. {
  257. iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
  258. }
  259. static struct syscore_ops loongson_ipi_syscore_ops = {
  260. .resume = loongson_ipi_resume,
  261. .suspend = loongson_ipi_suspend,
  262. };
  263. /*
  264. * Enable boot cpu ipi before enabling nonboot cpus
  265. * during syscore_resume.
  266. */
  267. static int __init ipi_pm_init(void)
  268. {
  269. register_syscore_ops(&loongson_ipi_syscore_ops);
  270. return 0;
  271. }
  272. core_initcall(ipi_pm_init);
  273. #endif
  274. static inline void set_cpu_sibling_map(int cpu)
  275. {
  276. int i;
  277. cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
  278. if (smp_num_siblings <= 1)
  279. cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
  280. else {
  281. for_each_cpu(i, &cpu_sibling_setup_map) {
  282. if (cpus_are_siblings(cpu, i)) {
  283. cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
  284. cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
  285. }
  286. }
  287. }
  288. }
  289. static inline void set_cpu_core_map(int cpu)
  290. {
  291. int i;
  292. cpumask_set_cpu(cpu, &cpu_core_setup_map);
  293. for_each_cpu(i, &cpu_core_setup_map) {
  294. if (cpu_data[cpu].package == cpu_data[i].package) {
  295. cpumask_set_cpu(i, &cpu_core_map[cpu]);
  296. cpumask_set_cpu(cpu, &cpu_core_map[i]);
  297. }
  298. }
  299. }
  300. /*
  301. * Calculate a new cpu_foreign_map mask whenever a
  302. * new cpu appears or disappears.
  303. */
  304. void calculate_cpu_foreign_map(void)
  305. {
  306. int i, k, core_present;
  307. cpumask_t temp_foreign_map;
  308. /* Re-calculate the mask */
  309. cpumask_clear(&temp_foreign_map);
  310. for_each_online_cpu(i) {
  311. core_present = 0;
  312. for_each_cpu(k, &temp_foreign_map)
  313. if (cpus_are_siblings(i, k))
  314. core_present = 1;
  315. if (!core_present)
  316. cpumask_set_cpu(i, &temp_foreign_map);
  317. }
  318. for_each_online_cpu(i)
  319. cpumask_andnot(&cpu_foreign_map[i],
  320. &temp_foreign_map, &cpu_sibling_map[i]);
  321. }
  322. /* Preload SMP state for boot cpu */
  323. void smp_prepare_boot_cpu(void)
  324. {
  325. unsigned int cpu, node, rr_node;
  326. set_cpu_possible(0, true);
  327. set_cpu_online(0, true);
  328. set_my_cpu_offset(per_cpu_offset(0));
  329. rr_node = first_node(node_online_map);
  330. for_each_possible_cpu(cpu) {
  331. node = early_cpu_to_node(cpu);
  332. /*
  333. * The mapping between present cpus and nodes has been
  334. * built during MADT and SRAT parsing.
  335. *
  336. * If possible cpus = present cpus here, early_cpu_to_node
  337. * will return valid node.
  338. *
  339. * If possible cpus > present cpus here (e.g. some possible
  340. * cpus will be added by cpu-hotplug later), for possible but
  341. * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
  342. * and we just map them to online nodes in round-robin way.
  343. * Once hotplugged, new correct mapping will be built for them.
  344. */
  345. if (node != NUMA_NO_NODE)
  346. set_cpu_numa_node(cpu, node);
  347. else {
  348. set_cpu_numa_node(cpu, rr_node);
  349. rr_node = next_node_in(rr_node, node_online_map);
  350. }
  351. }
  352. }
  353. /* called from main before smp_init() */
  354. void __init smp_prepare_cpus(unsigned int max_cpus)
  355. {
  356. init_new_context(current, &init_mm);
  357. current_thread_info()->cpu = 0;
  358. loongson_prepare_cpus(max_cpus);
  359. set_cpu_sibling_map(0);
  360. set_cpu_core_map(0);
  361. calculate_cpu_foreign_map();
  362. #ifndef CONFIG_HOTPLUG_CPU
  363. init_cpu_present(cpu_possible_mask);
  364. #endif
  365. }
  366. int __cpu_up(unsigned int cpu, struct task_struct *tidle)
  367. {
  368. loongson_boot_secondary(cpu, tidle);
  369. /* Wait for CPU to start and be ready to sync counters */
  370. if (!wait_for_completion_timeout(&cpu_starting,
  371. msecs_to_jiffies(5000))) {
  372. pr_crit("CPU%u: failed to start\n", cpu);
  373. return -EIO;
  374. }
  375. /* Wait for CPU to finish startup & mark itself online before return */
  376. wait_for_completion(&cpu_running);
  377. return 0;
  378. }
  379. /*
  380. * First C code run on the secondary CPUs after being started up by
  381. * the master.
  382. */
  383. asmlinkage void start_secondary(void)
  384. {
  385. unsigned int cpu;
  386. sync_counter();
  387. cpu = smp_processor_id();
  388. set_my_cpu_offset(per_cpu_offset(cpu));
  389. cpu_probe();
  390. constant_clockevent_init();
  391. loongson_init_secondary();
  392. set_cpu_sibling_map(cpu);
  393. set_cpu_core_map(cpu);
  394. notify_cpu_starting(cpu);
  395. /* Notify boot CPU that we're starting */
  396. complete(&cpu_starting);
  397. /* The CPU is running, now mark it online */
  398. set_cpu_online(cpu, true);
  399. calculate_cpu_foreign_map();
  400. /*
  401. * Notify boot CPU that we're up & online and it can safely return
  402. * from __cpu_up()
  403. */
  404. complete(&cpu_running);
  405. /*
  406. * irq will be enabled in loongson_smp_finish(), enabling it too
  407. * early is dangerous.
  408. */
  409. WARN_ON_ONCE(!irqs_disabled());
  410. loongson_smp_finish();
  411. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  412. }
  413. void __init smp_cpus_done(unsigned int max_cpus)
  414. {
  415. }
  416. static void stop_this_cpu(void *dummy)
  417. {
  418. set_cpu_online(smp_processor_id(), false);
  419. calculate_cpu_foreign_map();
  420. local_irq_disable();
  421. while (true);
  422. }
  423. void smp_send_stop(void)
  424. {
  425. smp_call_function(stop_this_cpu, NULL, 0);
  426. }
  427. int setup_profiling_timer(unsigned int multiplier)
  428. {
  429. return 0;
  430. }
  431. static void flush_tlb_all_ipi(void *info)
  432. {
  433. local_flush_tlb_all();
  434. }
  435. void flush_tlb_all(void)
  436. {
  437. on_each_cpu(flush_tlb_all_ipi, NULL, 1);
  438. }
  439. static void flush_tlb_mm_ipi(void *mm)
  440. {
  441. local_flush_tlb_mm((struct mm_struct *)mm);
  442. }
  443. void flush_tlb_mm(struct mm_struct *mm)
  444. {
  445. if (atomic_read(&mm->mm_users) == 0)
  446. return; /* happens as a result of exit_mmap() */
  447. preempt_disable();
  448. if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
  449. on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
  450. } else {
  451. unsigned int cpu;
  452. for_each_online_cpu(cpu) {
  453. if (cpu != smp_processor_id() && cpu_context(cpu, mm))
  454. cpu_context(cpu, mm) = 0;
  455. }
  456. local_flush_tlb_mm(mm);
  457. }
  458. preempt_enable();
  459. }
  460. struct flush_tlb_data {
  461. struct vm_area_struct *vma;
  462. unsigned long addr1;
  463. unsigned long addr2;
  464. };
  465. static void flush_tlb_range_ipi(void *info)
  466. {
  467. struct flush_tlb_data *fd = info;
  468. local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
  469. }
  470. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  471. {
  472. struct mm_struct *mm = vma->vm_mm;
  473. preempt_disable();
  474. if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
  475. struct flush_tlb_data fd = {
  476. .vma = vma,
  477. .addr1 = start,
  478. .addr2 = end,
  479. };
  480. on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
  481. } else {
  482. unsigned int cpu;
  483. for_each_online_cpu(cpu) {
  484. if (cpu != smp_processor_id() && cpu_context(cpu, mm))
  485. cpu_context(cpu, mm) = 0;
  486. }
  487. local_flush_tlb_range(vma, start, end);
  488. }
  489. preempt_enable();
  490. }
  491. static void flush_tlb_kernel_range_ipi(void *info)
  492. {
  493. struct flush_tlb_data *fd = info;
  494. local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
  495. }
  496. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  497. {
  498. struct flush_tlb_data fd = {
  499. .addr1 = start,
  500. .addr2 = end,
  501. };
  502. on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
  503. }
  504. static void flush_tlb_page_ipi(void *info)
  505. {
  506. struct flush_tlb_data *fd = info;
  507. local_flush_tlb_page(fd->vma, fd->addr1);
  508. }
  509. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  510. {
  511. preempt_disable();
  512. if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
  513. struct flush_tlb_data fd = {
  514. .vma = vma,
  515. .addr1 = page,
  516. };
  517. on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
  518. } else {
  519. unsigned int cpu;
  520. for_each_online_cpu(cpu) {
  521. if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
  522. cpu_context(cpu, vma->vm_mm) = 0;
  523. }
  524. local_flush_tlb_page(vma, page);
  525. }
  526. preempt_enable();
  527. }
  528. EXPORT_SYMBOL(flush_tlb_page);
  529. static void flush_tlb_one_ipi(void *info)
  530. {
  531. unsigned long vaddr = (unsigned long) info;
  532. local_flush_tlb_one(vaddr);
  533. }
  534. void flush_tlb_one(unsigned long vaddr)
  535. {
  536. on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
  537. }
  538. EXPORT_SYMBOL(flush_tlb_one);