cache-b15-rac.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Broadcom Brahma-B15 CPU read-ahead cache management functions
  4. *
  5. * Copyright (C) 2015-2016 Broadcom
  6. */
  7. #include <linux/err.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/io.h>
  10. #include <linux/bitops.h>
  11. #include <linux/of_address.h>
  12. #include <linux/notifier.h>
  13. #include <linux/cpu.h>
  14. #include <linux/syscore_ops.h>
  15. #include <linux/reboot.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/hardware/cache-b15-rac.h>
  18. extern void v7_flush_kern_cache_all(void);
  19. /* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
  20. #define RAC_CONFIG0_REG (0x78)
  21. #define RACENPREF_MASK (0x3)
  22. #define RACPREFINST_SHIFT (0)
  23. #define RACENINST_SHIFT (2)
  24. #define RACPREFDATA_SHIFT (4)
  25. #define RACENDATA_SHIFT (6)
  26. #define RAC_CPU_SHIFT (8)
  27. #define RACCFG_MASK (0xff)
  28. #define RAC_CONFIG1_REG (0x7c)
  29. /* Brahma-B15 is a quad-core only design */
  30. #define B15_RAC_FLUSH_REG (0x80)
  31. /* Brahma-B53 is an octo-core design */
  32. #define B53_RAC_FLUSH_REG (0x84)
  33. #define FLUSH_RAC (1 << 0)
  34. /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
  35. #define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
  36. RACENPREF_MASK << RACENINST_SHIFT | \
  37. 1 << RACPREFDATA_SHIFT | \
  38. RACENPREF_MASK << RACENDATA_SHIFT)
  39. #define RAC_ENABLED 0
  40. /* Special state where we want to bypass the spinlock and call directly
  41. * into the v7 cache maintenance operations during suspend/resume
  42. */
  43. #define RAC_SUSPENDED 1
  44. static void __iomem *b15_rac_base;
  45. static DEFINE_SPINLOCK(rac_lock);
  46. static u32 rac_config0_reg;
  47. static u32 rac_flush_offset;
  48. /* Initialization flag to avoid checking for b15_rac_base, and to prevent
  49. * multi-platform kernels from crashing here as well.
  50. */
  51. static unsigned long b15_rac_flags;
  52. static inline u32 __b15_rac_disable(void)
  53. {
  54. u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
  55. __raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
  56. dmb();
  57. return val;
  58. }
  59. static inline void __b15_rac_flush(void)
  60. {
  61. u32 reg;
  62. __raw_writel(FLUSH_RAC, b15_rac_base + rac_flush_offset);
  63. do {
  64. /* This dmb() is required to force the Bus Interface Unit
  65. * to clean outstanding writes, and forces an idle cycle
  66. * to be inserted.
  67. */
  68. dmb();
  69. reg = __raw_readl(b15_rac_base + rac_flush_offset);
  70. } while (reg & FLUSH_RAC);
  71. }
  72. static inline u32 b15_rac_disable_and_flush(void)
  73. {
  74. u32 reg;
  75. reg = __b15_rac_disable();
  76. __b15_rac_flush();
  77. return reg;
  78. }
  79. static inline void __b15_rac_enable(u32 val)
  80. {
  81. __raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
  82. /* dsb() is required here to be consistent with __flush_icache_all() */
  83. dsb();
  84. }
  85. #define BUILD_RAC_CACHE_OP(name, bar) \
  86. void b15_flush_##name(void) \
  87. { \
  88. unsigned int do_flush; \
  89. u32 val = 0; \
  90. \
  91. if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \
  92. v7_flush_##name(); \
  93. bar; \
  94. return; \
  95. } \
  96. \
  97. spin_lock(&rac_lock); \
  98. do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \
  99. if (do_flush) \
  100. val = b15_rac_disable_and_flush(); \
  101. v7_flush_##name(); \
  102. if (!do_flush) \
  103. bar; \
  104. else \
  105. __b15_rac_enable(val); \
  106. spin_unlock(&rac_lock); \
  107. }
  108. #define nobarrier
  109. /* The readahead cache present in the Brahma-B15 CPU is a special piece of
  110. * hardware after the integrated L2 cache of the B15 CPU complex whose purpose
  111. * is to prefetch instruction and/or data with a line size of either 64 bytes
  112. * or 256 bytes. The rationale is that the data-bus of the CPU interface is
  113. * optimized for 256-bytes transactions, and enabling the readahead cache
  114. * provides a significant performance boost we want it enabled (typically
  115. * twice the performance for a memcpy benchmark application).
  116. *
  117. * The readahead cache is transparent for Modified Virtual Addresses
  118. * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
  119. * DCCIMVAC.
  120. *
  121. * It is however not transparent for the following cache maintenance
  122. * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
  123. * what we are patching here with our BUILD_RAC_CACHE_OP here.
  124. */
  125. BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
  126. static void b15_rac_enable(void)
  127. {
  128. unsigned int cpu;
  129. u32 enable = 0;
  130. for_each_possible_cpu(cpu)
  131. enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
  132. b15_rac_disable_and_flush();
  133. __b15_rac_enable(enable);
  134. }
  135. static int b15_rac_reboot_notifier(struct notifier_block *nb,
  136. unsigned long action,
  137. void *data)
  138. {
  139. /* During kexec, we are not yet migrated on the boot CPU, so we need to
  140. * make sure we are SMP safe here. Once the RAC is disabled, flag it as
  141. * suspended such that the hotplug notifier returns early.
  142. */
  143. if (action == SYS_RESTART) {
  144. spin_lock(&rac_lock);
  145. b15_rac_disable_and_flush();
  146. clear_bit(RAC_ENABLED, &b15_rac_flags);
  147. set_bit(RAC_SUSPENDED, &b15_rac_flags);
  148. spin_unlock(&rac_lock);
  149. }
  150. return NOTIFY_DONE;
  151. }
  152. static struct notifier_block b15_rac_reboot_nb = {
  153. .notifier_call = b15_rac_reboot_notifier,
  154. };
  155. /* The CPU hotplug case is the most interesting one, we basically need to make
  156. * sure that the RAC is disabled for the entire system prior to having a CPU
  157. * die, in particular prior to this dying CPU having exited the coherency
  158. * domain.
  159. *
  160. * Once this CPU is marked dead, we can safely re-enable the RAC for the
  161. * remaining CPUs in the system which are still online.
  162. *
  163. * Offlining a CPU is the problematic case, onlining a CPU is not much of an
  164. * issue since the CPU and its cache-level hierarchy will start filling with
  165. * the RAC disabled, so L1 and L2 only.
  166. *
  167. * In this function, we should NOT have to verify any unsafe setting/condition
  168. * b15_rac_base:
  169. *
  170. * It is protected by the RAC_ENABLED flag which is cleared by default, and
  171. * being cleared when initial procedure is done. b15_rac_base had been set at
  172. * that time.
  173. *
  174. * RAC_ENABLED:
  175. * There is a small timing windows, in b15_rac_init(), between
  176. * cpuhp_setup_state_*()
  177. * ...
  178. * set RAC_ENABLED
  179. * However, there is no hotplug activity based on the Linux booting procedure.
  180. *
  181. * Since we have to disable RAC for all cores, we keep RAC on as long as as
  182. * possible (disable it as late as possible) to gain the cache benefit.
  183. *
  184. * Thus, dying/dead states are chosen here
  185. *
  186. * We are choosing not do disable the RAC on a per-CPU basis, here, if we did
  187. * we would want to consider disabling it as early as possible to benefit the
  188. * other active CPUs.
  189. */
  190. /* Running on the dying CPU */
  191. static int b15_rac_dying_cpu(unsigned int cpu)
  192. {
  193. /* During kexec/reboot, the RAC is disabled via the reboot notifier
  194. * return early here.
  195. */
  196. if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
  197. return 0;
  198. spin_lock(&rac_lock);
  199. /* Indicate that we are starting a hotplug procedure */
  200. __clear_bit(RAC_ENABLED, &b15_rac_flags);
  201. /* Disable the readahead cache and save its value to a global */
  202. rac_config0_reg = b15_rac_disable_and_flush();
  203. spin_unlock(&rac_lock);
  204. return 0;
  205. }
  206. /* Running on a non-dying CPU */
  207. static int b15_rac_dead_cpu(unsigned int cpu)
  208. {
  209. /* During kexec/reboot, the RAC is disabled via the reboot notifier
  210. * return early here.
  211. */
  212. if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
  213. return 0;
  214. spin_lock(&rac_lock);
  215. /* And enable it */
  216. __b15_rac_enable(rac_config0_reg);
  217. __set_bit(RAC_ENABLED, &b15_rac_flags);
  218. spin_unlock(&rac_lock);
  219. return 0;
  220. }
  221. static int b15_rac_suspend(void)
  222. {
  223. /* Suspend the read-ahead cache oeprations, forcing our cache
  224. * implementation to fallback to the regular ARMv7 calls.
  225. *
  226. * We are guaranteed to be running on the boot CPU at this point and
  227. * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
  228. * here.
  229. */
  230. rac_config0_reg = b15_rac_disable_and_flush();
  231. set_bit(RAC_SUSPENDED, &b15_rac_flags);
  232. return 0;
  233. }
  234. static void b15_rac_resume(void)
  235. {
  236. /* Coming out of a S3 suspend/resume cycle, the read-ahead cache
  237. * register RAC_CONFIG0_REG will be restored to its default value, make
  238. * sure we re-enable it and set the enable flag, we are also guaranteed
  239. * to run on the boot CPU, so not racy again.
  240. */
  241. __b15_rac_enable(rac_config0_reg);
  242. clear_bit(RAC_SUSPENDED, &b15_rac_flags);
  243. }
  244. static struct syscore_ops b15_rac_syscore_ops = {
  245. .suspend = b15_rac_suspend,
  246. .resume = b15_rac_resume,
  247. };
  248. static int __init b15_rac_init(void)
  249. {
  250. struct device_node *dn, *cpu_dn;
  251. int ret = 0, cpu;
  252. u32 reg, en_mask = 0;
  253. dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
  254. if (!dn)
  255. return -ENODEV;
  256. if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
  257. goto out;
  258. b15_rac_base = of_iomap(dn, 0);
  259. if (!b15_rac_base) {
  260. pr_err("failed to remap BIU control base\n");
  261. ret = -ENOMEM;
  262. goto out;
  263. }
  264. cpu_dn = of_get_cpu_node(0, NULL);
  265. if (!cpu_dn) {
  266. ret = -ENODEV;
  267. goto out;
  268. }
  269. if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
  270. rac_flush_offset = B15_RAC_FLUSH_REG;
  271. else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
  272. rac_flush_offset = B53_RAC_FLUSH_REG;
  273. else {
  274. pr_err("Unsupported CPU\n");
  275. of_node_put(cpu_dn);
  276. ret = -EINVAL;
  277. goto out;
  278. }
  279. of_node_put(cpu_dn);
  280. ret = register_reboot_notifier(&b15_rac_reboot_nb);
  281. if (ret) {
  282. pr_err("failed to register reboot notifier\n");
  283. iounmap(b15_rac_base);
  284. goto out;
  285. }
  286. if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
  287. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
  288. "arm/cache-b15-rac:dead",
  289. NULL, b15_rac_dead_cpu);
  290. if (ret)
  291. goto out_unmap;
  292. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
  293. "arm/cache-b15-rac:dying",
  294. NULL, b15_rac_dying_cpu);
  295. if (ret)
  296. goto out_cpu_dead;
  297. }
  298. if (IS_ENABLED(CONFIG_PM_SLEEP))
  299. register_syscore_ops(&b15_rac_syscore_ops);
  300. spin_lock(&rac_lock);
  301. reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
  302. for_each_possible_cpu(cpu)
  303. en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
  304. WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
  305. b15_rac_enable();
  306. set_bit(RAC_ENABLED, &b15_rac_flags);
  307. spin_unlock(&rac_lock);
  308. pr_info("%pOF: Broadcom Brahma-B15 readahead cache\n", dn);
  309. goto out;
  310. out_cpu_dead:
  311. cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
  312. out_unmap:
  313. unregister_reboot_notifier(&b15_rac_reboot_nb);
  314. iounmap(b15_rac_base);
  315. out:
  316. of_node_put(dn);
  317. return ret;
  318. }
  319. arch_initcall(b15_rac_init);