cache.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. *
  5. * Derived from MIPS:
  6. * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle ([email protected])
  7. * Copyright (C) 2007 MIPS Technologies, Inc.
  8. */
  9. #include <linux/cacheinfo.h>
  10. #include <linux/export.h>
  11. #include <linux/fs.h>
  12. #include <linux/highmem.h>
  13. #include <linux/kernel.h>
  14. #include <linux/linkage.h>
  15. #include <linux/mm.h>
  16. #include <linux/sched.h>
  17. #include <linux/syscalls.h>
  18. #include <asm/bootinfo.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/cpu.h>
  21. #include <asm/cpu-features.h>
  22. #include <asm/loongarch.h>
  23. #include <asm/numa.h>
  24. #include <asm/processor.h>
  25. #include <asm/setup.h>
  26. void cache_error_setup(void)
  27. {
  28. extern char __weak except_vec_cex;
  29. set_merr_handler(0x0, &except_vec_cex, 0x80);
  30. }
  31. /*
  32. * LoongArch maintains ICache/DCache coherency by hardware,
  33. * we just need "ibar" to avoid instruction hazard here.
  34. */
  35. void local_flush_icache_range(unsigned long start, unsigned long end)
  36. {
  37. asm volatile ("\tibar 0\n"::);
  38. }
  39. EXPORT_SYMBOL(local_flush_icache_range);
  40. static void flush_cache_leaf(unsigned int leaf)
  41. {
  42. int i, j, nr_nodes;
  43. uint64_t addr = CSR_DMW0_BASE;
  44. struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
  45. nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes;
  46. do {
  47. for (i = 0; i < cdesc->sets; i++) {
  48. for (j = 0; j < cdesc->ways; j++) {
  49. flush_cache_line(leaf, addr);
  50. addr++;
  51. }
  52. addr -= cdesc->ways;
  53. addr += cdesc->linesz;
  54. }
  55. addr += (1ULL << NODE_ADDRSPACE_SHIFT);
  56. } while (--nr_nodes > 0);
  57. }
  58. asmlinkage __visible void __flush_cache_all(void)
  59. {
  60. int leaf;
  61. struct cache_desc *cdesc = current_cpu_data.cache_leaves;
  62. unsigned int cache_present = current_cpu_data.cache_leaves_present;
  63. leaf = cache_present - 1;
  64. if (cache_inclusive(cdesc + leaf)) {
  65. flush_cache_leaf(leaf);
  66. return;
  67. }
  68. for (leaf = 0; leaf < cache_present; leaf++)
  69. flush_cache_leaf(leaf);
  70. }
  71. #define L1IUPRE (1 << 0)
  72. #define L1IUUNIFY (1 << 1)
  73. #define L1DPRE (1 << 2)
  74. #define LXIUPRE (1 << 0)
  75. #define LXIUUNIFY (1 << 1)
  76. #define LXIUPRIV (1 << 2)
  77. #define LXIUINCL (1 << 3)
  78. #define LXDPRE (1 << 4)
  79. #define LXDPRIV (1 << 5)
  80. #define LXDINCL (1 << 6)
  81. #define populate_cache_properties(cfg0, cdesc, level, leaf) \
  82. do { \
  83. unsigned int cfg1; \
  84. \
  85. cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \
  86. if (level == 1) { \
  87. cdesc->flags |= CACHE_PRIVATE; \
  88. } else { \
  89. if (cfg0 & LXIUPRIV) \
  90. cdesc->flags |= CACHE_PRIVATE; \
  91. if (cfg0 & LXIUINCL) \
  92. cdesc->flags |= CACHE_INCLUSIVE; \
  93. } \
  94. cdesc->level = level; \
  95. cdesc->flags |= CACHE_PRESENT; \
  96. cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \
  97. cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \
  98. cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \
  99. cdesc++; leaf++; \
  100. } while (0)
  101. void cpu_cache_init(void)
  102. {
  103. unsigned int leaf = 0, level = 1;
  104. unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
  105. struct cache_desc *cdesc = current_cpu_data.cache_leaves;
  106. if (config & L1IUPRE) {
  107. if (config & L1IUUNIFY)
  108. cdesc->type = CACHE_TYPE_UNIFIED;
  109. else
  110. cdesc->type = CACHE_TYPE_INST;
  111. populate_cache_properties(config, cdesc, level, leaf);
  112. }
  113. if (config & L1DPRE) {
  114. cdesc->type = CACHE_TYPE_DATA;
  115. populate_cache_properties(config, cdesc, level, leaf);
  116. }
  117. config = config >> 3;
  118. for (level = 2; level <= CACHE_LEVEL_MAX; level++) {
  119. if (!config)
  120. break;
  121. if (config & LXIUPRE) {
  122. if (config & LXIUUNIFY)
  123. cdesc->type = CACHE_TYPE_UNIFIED;
  124. else
  125. cdesc->type = CACHE_TYPE_INST;
  126. populate_cache_properties(config, cdesc, level, leaf);
  127. }
  128. if (config & LXDPRE) {
  129. cdesc->type = CACHE_TYPE_DATA;
  130. populate_cache_properties(config, cdesc, level, leaf);
  131. }
  132. config = config >> 7;
  133. }
  134. BUG_ON(leaf > CACHE_LEAVES_MAX);
  135. current_cpu_data.cache_leaves_present = leaf;
  136. current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
  137. shm_align_mask = PAGE_SIZE - 1;
  138. }
  139. static const pgprot_t protection_map[16] = {
  140. [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER |
  141. _PAGE_PROTNONE | _PAGE_NO_EXEC |
  142. _PAGE_NO_READ),
  143. [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  144. _PAGE_USER | _PAGE_PRESENT |
  145. _PAGE_NO_EXEC),
  146. [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
  147. _PAGE_USER | _PAGE_PRESENT |
  148. _PAGE_NO_EXEC),
  149. [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  150. _PAGE_USER | _PAGE_PRESENT |
  151. _PAGE_NO_EXEC),
  152. [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
  153. _PAGE_USER | _PAGE_PRESENT),
  154. [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  155. _PAGE_USER | _PAGE_PRESENT),
  156. [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
  157. _PAGE_USER | _PAGE_PRESENT),
  158. [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  159. _PAGE_USER | _PAGE_PRESENT),
  160. [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER |
  161. _PAGE_PROTNONE | _PAGE_NO_EXEC |
  162. _PAGE_NO_READ),
  163. [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  164. _PAGE_USER | _PAGE_PRESENT |
  165. _PAGE_NO_EXEC),
  166. [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
  167. _PAGE_USER | _PAGE_PRESENT |
  168. _PAGE_NO_EXEC | _PAGE_WRITE),
  169. [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  170. _PAGE_USER | _PAGE_PRESENT |
  171. _PAGE_NO_EXEC | _PAGE_WRITE),
  172. [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
  173. _PAGE_USER | _PAGE_PRESENT),
  174. [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  175. _PAGE_USER | _PAGE_PRESENT),
  176. [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
  177. _PAGE_USER | _PAGE_PRESENT |
  178. _PAGE_WRITE),
  179. [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
  180. _PAGE_USER | _PAGE_PRESENT |
  181. _PAGE_WRITE)
  182. };
  183. DECLARE_VM_GET_PAGE_PROT