cachev2.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/spinlock.h>
  4. #include <linux/smp.h>
  5. #include <linux/mm.h>
  6. #include <asm/cache.h>
  7. #include <asm/barrier.h>
  8. /* for L1-cache */
  9. #define INS_CACHE (1 << 0)
  10. #define DATA_CACHE (1 << 1)
  11. #define CACHE_INV (1 << 4)
  12. #define CACHE_CLR (1 << 5)
  13. #define CACHE_OMS (1 << 6)
  14. void local_icache_inv_all(void *priv)
  15. {
  16. mtcr("cr17", INS_CACHE|CACHE_INV);
  17. sync_is();
  18. }
  19. #ifdef CONFIG_CPU_HAS_ICACHE_INS
  20. void icache_inv_range(unsigned long start, unsigned long end)
  21. {
  22. unsigned long i = start & ~(L1_CACHE_BYTES - 1);
  23. for (; i < end; i += L1_CACHE_BYTES)
  24. asm volatile("icache.iva %0\n"::"r"(i):"memory");
  25. sync_is();
  26. }
  27. #else
  28. struct cache_range {
  29. unsigned long start;
  30. unsigned long end;
  31. };
  32. static DEFINE_SPINLOCK(cache_lock);
  33. static inline void cache_op_line(unsigned long i, unsigned int val)
  34. {
  35. mtcr("cr22", i);
  36. mtcr("cr17", val);
  37. }
  38. void local_icache_inv_range(void *priv)
  39. {
  40. struct cache_range *param = priv;
  41. unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
  42. unsigned long flags;
  43. spin_lock_irqsave(&cache_lock, flags);
  44. for (; i < param->end; i += L1_CACHE_BYTES)
  45. cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
  46. spin_unlock_irqrestore(&cache_lock, flags);
  47. sync_is();
  48. }
  49. void icache_inv_range(unsigned long start, unsigned long end)
  50. {
  51. struct cache_range param = { start, end };
  52. if (irqs_disabled())
  53. local_icache_inv_range(&param);
  54. else
  55. on_each_cpu(local_icache_inv_range, &param, 1);
  56. }
  57. #endif
  58. inline void dcache_wb_line(unsigned long start)
  59. {
  60. asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
  61. sync_is();
  62. }
  63. void dcache_wb_range(unsigned long start, unsigned long end)
  64. {
  65. unsigned long i = start & ~(L1_CACHE_BYTES - 1);
  66. for (; i < end; i += L1_CACHE_BYTES)
  67. asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
  68. sync_is();
  69. }
  70. void cache_wbinv_range(unsigned long start, unsigned long end)
  71. {
  72. dcache_wb_range(start, end);
  73. icache_inv_range(start, end);
  74. }
  75. EXPORT_SYMBOL(cache_wbinv_range);
  76. void dma_wbinv_range(unsigned long start, unsigned long end)
  77. {
  78. unsigned long i = start & ~(L1_CACHE_BYTES - 1);
  79. for (; i < end; i += L1_CACHE_BYTES)
  80. asm volatile("dcache.civa %0\n"::"r"(i):"memory");
  81. sync_is();
  82. }
  83. void dma_inv_range(unsigned long start, unsigned long end)
  84. {
  85. unsigned long i = start & ~(L1_CACHE_BYTES - 1);
  86. for (; i < end; i += L1_CACHE_BYTES)
  87. asm volatile("dcache.iva %0\n"::"r"(i):"memory");
  88. sync_is();
  89. }
  90. void dma_wb_range(unsigned long start, unsigned long end)
  91. {
  92. unsigned long i = start & ~(L1_CACHE_BYTES - 1);
  93. for (; i < end; i += L1_CACHE_BYTES)
  94. asm volatile("dcache.cva %0\n"::"r"(i):"memory");
  95. sync_is();
  96. }