cacheflush.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 SiFive
  4. */
  5. #include <linux/of.h>
  6. #include <asm/cacheflush.h>
  7. #ifdef CONFIG_SMP
  8. #include <asm/sbi.h>
  9. static void ipi_remote_fence_i(void *info)
  10. {
  11. return local_flush_icache_all();
  12. }
  13. void flush_icache_all(void)
  14. {
  15. local_flush_icache_all();
  16. if (IS_ENABLED(CONFIG_RISCV_SBI))
  17. sbi_remote_fence_i(NULL);
  18. else
  19. on_each_cpu(ipi_remote_fence_i, NULL, 1);
  20. }
  21. EXPORT_SYMBOL(flush_icache_all);
  22. /*
  23. * Performs an icache flush for the given MM context. RISC-V has no direct
  24. * mechanism for instruction cache shoot downs, so instead we send an IPI that
  25. * informs the remote harts they need to flush their local instruction caches.
  26. * To avoid pathologically slow behavior in a common case (a bunch of
  27. * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
  28. * IPIs for harts that are not currently executing a MM context and instead
  29. * schedule a deferred local instruction cache flush to be performed before
  30. * execution resumes on each hart.
  31. */
  32. void flush_icache_mm(struct mm_struct *mm, bool local)
  33. {
  34. unsigned int cpu;
  35. cpumask_t others, *mask;
  36. preempt_disable();
  37. /* Mark every hart's icache as needing a flush for this MM. */
  38. mask = &mm->context.icache_stale_mask;
  39. cpumask_setall(mask);
  40. /* Flush this hart's I$ now, and mark it as flushed. */
  41. cpu = smp_processor_id();
  42. cpumask_clear_cpu(cpu, mask);
  43. local_flush_icache_all();
  44. /*
  45. * Flush the I$ of other harts concurrently executing, and mark them as
  46. * flushed.
  47. */
  48. cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
  49. local |= cpumask_empty(&others);
  50. if (mm == current->active_mm && local) {
  51. /*
  52. * It's assumed that at least one strongly ordered operation is
  53. * performed on this hart between setting a hart's cpumask bit
  54. * and scheduling this MM context on that hart. Sending an SBI
  55. * remote message will do this, but in the case where no
  56. * messages are sent we still need to order this hart's writes
  57. * with flush_icache_deferred().
  58. */
  59. smp_mb();
  60. } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
  61. sbi_remote_fence_i(&others);
  62. } else {
  63. on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
  64. }
  65. preempt_enable();
  66. }
  67. #endif /* CONFIG_SMP */
  68. #ifdef CONFIG_MMU
  69. void flush_icache_pte(pte_t pte)
  70. {
  71. struct page *page = pte_page(pte);
  72. if (!test_bit(PG_dcache_clean, &page->flags)) {
  73. flush_icache_all();
  74. set_bit(PG_dcache_clean, &page->flags);
  75. }
  76. }
  77. #endif /* CONFIG_MMU */
  78. unsigned int riscv_cbom_block_size;
  79. EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
  80. void riscv_init_cbom_blocksize(void)
  81. {
  82. struct device_node *node;
  83. unsigned long cbom_hartid;
  84. u32 val, probed_block_size;
  85. int ret;
  86. probed_block_size = 0;
  87. for_each_of_cpu_node(node) {
  88. unsigned long hartid;
  89. ret = riscv_of_processor_hartid(node, &hartid);
  90. if (ret)
  91. continue;
  92. /* set block-size for cbom extension if available */
  93. ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
  94. if (ret)
  95. continue;
  96. if (!probed_block_size) {
  97. probed_block_size = val;
  98. cbom_hartid = hartid;
  99. } else {
  100. if (probed_block_size != val)
  101. pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
  102. cbom_hartid, hartid);
  103. }
  104. }
  105. if (probed_block_size)
  106. riscv_cbom_block_size = probed_block_size;
  107. }