cache-tauros2.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
  4. *
  5. * Copyright (C) 2008 Marvell Semiconductor
  6. *
  7. * References:
  8. * - PJ1 CPU Core Datasheet,
  9. * Document ID MV-S104837-01, Rev 0.7, January 24 2008.
  10. * - PJ4 CPU Core Datasheet,
  11. * Document ID MV-S105190-00, Rev 0.7, March 14 2008.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/cp15.h>
  18. #include <asm/cputype.h>
  19. #include <asm/hardware/cache-tauros2.h>
  20. /* CP15 PJ4 Control configuration register */
  21. #define CCR_L2C_PREFETCH_DISABLE BIT(24)
  22. #define CCR_L2C_ECC_ENABLE BIT(23)
  23. #define CCR_L2C_WAY7_4_DISABLE BIT(21)
  24. #define CCR_L2C_BURST8_ENABLE BIT(20)
  25. /*
  26. * When Tauros2 is used on a CPU that supports the v7 hierarchical
  27. * cache operations, the cache handling code in proc-v7.S takes care
  28. * of everything, including handling DMA coherency.
  29. *
  30. * So, we only need to register outer cache operations here if we're
  31. * being used on a pre-v7 CPU, and we only need to build support for
  32. * outer cache operations into the kernel image if the kernel has been
  33. * configured to support a pre-v7 CPU.
  34. */
  35. #ifdef CONFIG_CPU_32v5
  36. /*
  37. * Low-level cache maintenance operations.
  38. */
  39. static inline void tauros2_clean_pa(unsigned long addr)
  40. {
  41. __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
  42. }
  43. static inline void tauros2_clean_inv_pa(unsigned long addr)
  44. {
  45. __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
  46. }
  47. static inline void tauros2_inv_pa(unsigned long addr)
  48. {
  49. __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
  50. }
  51. /*
  52. * Linux primitives.
  53. *
  54. * Note that the end addresses passed to Linux primitives are
  55. * noninclusive.
  56. */
  57. #define CACHE_LINE_SIZE 32
  58. static void tauros2_inv_range(unsigned long start, unsigned long end)
  59. {
  60. /*
  61. * Clean and invalidate partial first cache line.
  62. */
  63. if (start & (CACHE_LINE_SIZE - 1)) {
  64. tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
  65. start = (start | (CACHE_LINE_SIZE - 1)) + 1;
  66. }
  67. /*
  68. * Clean and invalidate partial last cache line.
  69. */
  70. if (end & (CACHE_LINE_SIZE - 1)) {
  71. tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
  72. end &= ~(CACHE_LINE_SIZE - 1);
  73. }
  74. /*
  75. * Invalidate all full cache lines between 'start' and 'end'.
  76. */
  77. while (start < end) {
  78. tauros2_inv_pa(start);
  79. start += CACHE_LINE_SIZE;
  80. }
  81. dsb();
  82. }
  83. static void tauros2_clean_range(unsigned long start, unsigned long end)
  84. {
  85. start &= ~(CACHE_LINE_SIZE - 1);
  86. while (start < end) {
  87. tauros2_clean_pa(start);
  88. start += CACHE_LINE_SIZE;
  89. }
  90. dsb();
  91. }
  92. static void tauros2_flush_range(unsigned long start, unsigned long end)
  93. {
  94. start &= ~(CACHE_LINE_SIZE - 1);
  95. while (start < end) {
  96. tauros2_clean_inv_pa(start);
  97. start += CACHE_LINE_SIZE;
  98. }
  99. dsb();
  100. }
  101. static void tauros2_disable(void)
  102. {
  103. __asm__ __volatile__ (
  104. "mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
  105. "mrc p15, 0, %0, c1, c0, 0\n\t"
  106. "bic %0, %0, #(1 << 26)\n\t"
  107. "mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t"
  108. : : "r" (0x0));
  109. }
  110. static void tauros2_resume(void)
  111. {
  112. __asm__ __volatile__ (
  113. "mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
  114. "mrc p15, 0, %0, c1, c0, 0\n\t"
  115. "orr %0, %0, #(1 << 26)\n\t"
  116. "mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
  117. : : "r" (0x0));
  118. }
  119. #endif
  120. static inline u32 __init read_extra_features(void)
  121. {
  122. u32 u;
  123. __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
  124. return u;
  125. }
  126. static inline void __init write_extra_features(u32 u)
  127. {
  128. __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
  129. }
  130. static inline int __init cpuid_scheme(void)
  131. {
  132. return !!((processor_id & 0x000f0000) == 0x000f0000);
  133. }
  134. static inline u32 __init read_mmfr3(void)
  135. {
  136. u32 mmfr3;
  137. __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
  138. return mmfr3;
  139. }
  140. static inline u32 __init read_actlr(void)
  141. {
  142. u32 actlr;
  143. __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
  144. return actlr;
  145. }
  146. static inline void __init write_actlr(u32 actlr)
  147. {
  148. __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
  149. }
  150. static void enable_extra_feature(unsigned int features)
  151. {
  152. u32 u;
  153. u = read_extra_features();
  154. if (features & CACHE_TAUROS2_PREFETCH_ON)
  155. u &= ~CCR_L2C_PREFETCH_DISABLE;
  156. else
  157. u |= CCR_L2C_PREFETCH_DISABLE;
  158. pr_info("Tauros2: %s L2 prefetch.\n",
  159. (features & CACHE_TAUROS2_PREFETCH_ON)
  160. ? "Enabling" : "Disabling");
  161. if (features & CACHE_TAUROS2_LINEFILL_BURST8)
  162. u |= CCR_L2C_BURST8_ENABLE;
  163. else
  164. u &= ~CCR_L2C_BURST8_ENABLE;
  165. pr_info("Tauros2: %s burst8 line fill.\n",
  166. (features & CACHE_TAUROS2_LINEFILL_BURST8)
  167. ? "Enabling" : "Disabling");
  168. write_extra_features(u);
  169. }
  170. static void __init tauros2_internal_init(unsigned int features)
  171. {
  172. char *mode = NULL;
  173. enable_extra_feature(features);
  174. #ifdef CONFIG_CPU_32v5
  175. if ((processor_id & 0xff0f0000) == 0x56050000) {
  176. u32 feat;
  177. /*
  178. * v5 CPUs with Tauros2 have the L2 cache enable bit
  179. * located in the CPU Extra Features register.
  180. */
  181. feat = read_extra_features();
  182. if (!(feat & 0x00400000)) {
  183. pr_info("Tauros2: Enabling L2 cache.\n");
  184. write_extra_features(feat | 0x00400000);
  185. }
  186. mode = "ARMv5";
  187. outer_cache.inv_range = tauros2_inv_range;
  188. outer_cache.clean_range = tauros2_clean_range;
  189. outer_cache.flush_range = tauros2_flush_range;
  190. outer_cache.disable = tauros2_disable;
  191. outer_cache.resume = tauros2_resume;
  192. }
  193. #endif
  194. #ifdef CONFIG_CPU_32v7
  195. /*
  196. * Check whether this CPU has support for the v7 hierarchical
  197. * cache ops. (PJ4 is in its v7 personality mode if the MMFR3
  198. * register indicates support for the v7 hierarchical cache
  199. * ops.)
  200. *
  201. * (Although strictly speaking there may exist CPUs that
  202. * implement the v7 cache ops but are only ARMv6 CPUs (due to
  203. * not complying with all of the other ARMv7 requirements),
  204. * there are no real-life examples of Tauros2 being used on
  205. * such CPUs as of yet.)
  206. */
  207. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
  208. u32 actlr;
  209. /*
  210. * When Tauros2 is used in an ARMv7 system, the L2
  211. * enable bit is located in the Auxiliary System Control
  212. * Register (which is the only register allowed by the
  213. * ARMv7 spec to contain fine-grained cache control bits).
  214. */
  215. actlr = read_actlr();
  216. if (!(actlr & 0x00000002)) {
  217. pr_info("Tauros2: Enabling L2 cache.\n");
  218. write_actlr(actlr | 0x00000002);
  219. }
  220. mode = "ARMv7";
  221. }
  222. #endif
  223. if (mode == NULL) {
  224. pr_crit("Tauros2: Unable to detect CPU mode.\n");
  225. return;
  226. }
  227. pr_info("Tauros2: L2 cache support initialised "
  228. "in %s mode.\n", mode);
  229. }
  230. #ifdef CONFIG_OF
  231. static const struct of_device_id tauros2_ids[] __initconst = {
  232. { .compatible = "marvell,tauros2-cache"},
  233. {}
  234. };
  235. #endif
  236. void __init tauros2_init(unsigned int features)
  237. {
  238. #ifdef CONFIG_OF
  239. struct device_node *node;
  240. int ret;
  241. unsigned int f;
  242. node = of_find_matching_node(NULL, tauros2_ids);
  243. if (!node) {
  244. pr_info("Not found marvell,tauros2-cache, disable it\n");
  245. } else {
  246. ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
  247. if (ret) {
  248. pr_info("Not found marvell,tauros-cache-features property, "
  249. "disable extra features\n");
  250. features = 0;
  251. } else
  252. features = f;
  253. }
  254. #endif
  255. tauros2_internal_init(features);
  256. }