cachev1.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/spinlock.h>
  4. #include <asm/cache.h>
  5. #include <abi/reg_ops.h>
  6. /* for L1-cache */
  7. #define INS_CACHE (1 << 0)
  8. #define DATA_CACHE (1 << 1)
  9. #define CACHE_INV (1 << 4)
  10. #define CACHE_CLR (1 << 5)
  11. #define CACHE_OMS (1 << 6)
  12. #define CACHE_ITS (1 << 7)
  13. #define CACHE_LICF (1 << 31)
  14. /* for L2-cache */
  15. #define CR22_LEVEL_SHIFT (1)
  16. #define CR22_SET_SHIFT (7)
  17. #define CR22_WAY_SHIFT (30)
  18. #define CR22_WAY_SHIFT_L2 (29)
  19. static DEFINE_SPINLOCK(cache_lock);
  20. static inline void cache_op_line(unsigned long i, unsigned int val)
  21. {
  22. mtcr("cr22", i);
  23. mtcr("cr17", val);
  24. }
  25. #define CCR2_L2E (1 << 3)
  26. static void cache_op_all(unsigned int value, unsigned int l2)
  27. {
  28. mtcr("cr17", value | CACHE_CLR);
  29. mb();
  30. if (l2 && (mfcr_ccr2() & CCR2_L2E)) {
  31. mtcr("cr24", value | CACHE_CLR);
  32. mb();
  33. }
  34. }
  35. static void cache_op_range(
  36. unsigned int start,
  37. unsigned int end,
  38. unsigned int value,
  39. unsigned int l2)
  40. {
  41. unsigned long i, flags;
  42. unsigned int val = value | CACHE_CLR | CACHE_OMS;
  43. bool l2_sync;
  44. if (unlikely((end - start) >= PAGE_SIZE) ||
  45. unlikely(start < PAGE_OFFSET) ||
  46. unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) {
  47. cache_op_all(value, l2);
  48. return;
  49. }
  50. if ((mfcr_ccr2() & CCR2_L2E) && l2)
  51. l2_sync = 1;
  52. else
  53. l2_sync = 0;
  54. spin_lock_irqsave(&cache_lock, flags);
  55. i = start & ~(L1_CACHE_BYTES - 1);
  56. for (; i < end; i += L1_CACHE_BYTES) {
  57. cache_op_line(i, val);
  58. if (l2_sync) {
  59. mb();
  60. mtcr("cr24", val);
  61. }
  62. }
  63. spin_unlock_irqrestore(&cache_lock, flags);
  64. mb();
  65. }
  66. void dcache_wb_line(unsigned long start)
  67. {
  68. asm volatile("idly4\n":::"memory");
  69. cache_op_line(start, DATA_CACHE|CACHE_CLR);
  70. mb();
  71. }
  72. void icache_inv_range(unsigned long start, unsigned long end)
  73. {
  74. cache_op_range(start, end, INS_CACHE|CACHE_INV, 0);
  75. }
  76. void icache_inv_all(void)
  77. {
  78. cache_op_all(INS_CACHE|CACHE_INV, 0);
  79. }
  80. void local_icache_inv_all(void *priv)
  81. {
  82. cache_op_all(INS_CACHE|CACHE_INV, 0);
  83. }
  84. void dcache_wb_range(unsigned long start, unsigned long end)
  85. {
  86. cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
  87. }
  88. void dcache_wbinv_all(void)
  89. {
  90. cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
  91. }
  92. void cache_wbinv_range(unsigned long start, unsigned long end)
  93. {
  94. cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
  95. }
  96. EXPORT_SYMBOL(cache_wbinv_range);
  97. void cache_wbinv_all(void)
  98. {
  99. cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
  100. }
  101. void dma_wbinv_range(unsigned long start, unsigned long end)
  102. {
  103. cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
  104. }
  105. void dma_inv_range(unsigned long start, unsigned long end)
  106. {
  107. cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
  108. }
  109. void dma_wb_range(unsigned long start, unsigned long end)
  110. {
  111. cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
  112. }