cache-uniphier.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2015-2016 Socionext Inc.
  4. * Author: Masahiro Yamada <[email protected]>
  5. */
  6. #define pr_fmt(fmt) "uniphier: " fmt
  7. #include <linux/bitops.h>
  8. #include <linux/init.h>
  9. #include <linux/io.h>
  10. #include <linux/log2.h>
  11. #include <linux/of_address.h>
  12. #include <linux/slab.h>
  13. #include <asm/hardware/cache-uniphier.h>
  14. #include <asm/outercache.h>
  15. /* control registers */
  16. #define UNIPHIER_SSCC 0x0 /* Control Register */
  17. #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
  18. #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
  19. #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
  20. #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
  21. #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
  22. #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
  23. #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
  24. /* revision registers */
  25. #define UNIPHIER_SSCID 0x0 /* ID Register */
  26. /* operation registers */
  27. #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
  28. #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
  29. #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
  30. #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
  31. #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
  32. #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
  33. #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
  34. #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
  35. #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
  36. #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
  37. #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
  38. #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
  39. #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
  40. #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
  41. #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
  42. #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
  43. #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
  44. #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
  45. #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
  46. #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
  47. #define UNIPHIER_SSCOLPQS_EF BIT(2)
  48. #define UNIPHIER_SSCOLPQS_EST BIT(1)
  49. #define UNIPHIER_SSCOLPQS_QST BIT(0)
  50. /* Is the operation region specified by address range? */
  51. #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
  52. ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
  53. /**
  54. * uniphier_cache_data - UniPhier outer cache specific data
  55. *
  56. * @ctrl_base: virtual base address of control registers
  57. * @rev_base: virtual base address of revision registers
  58. * @op_base: virtual base address of operation registers
  59. * @way_mask: each bit specifies if the way is present
  60. * @nsets: number of associativity sets
  61. * @line_size: line size in bytes
  62. * @range_op_max_size: max size that can be handled by a single range operation
  63. * @list: list node to include this level in the whole cache hierarchy
  64. */
  65. struct uniphier_cache_data {
  66. void __iomem *ctrl_base;
  67. void __iomem *rev_base;
  68. void __iomem *op_base;
  69. void __iomem *way_ctrl_base;
  70. u32 way_mask;
  71. u32 nsets;
  72. u32 line_size;
  73. u32 range_op_max_size;
  74. struct list_head list;
  75. };
  76. /*
  77. * List of the whole outer cache hierarchy. This list is only modified during
  78. * the early boot stage, so no mutex is taken for the access to the list.
  79. */
  80. static LIST_HEAD(uniphier_cache_list);
  81. /**
  82. * __uniphier_cache_sync - perform a sync point for a particular cache level
  83. *
  84. * @data: cache controller specific data
  85. */
  86. static void __uniphier_cache_sync(struct uniphier_cache_data *data)
  87. {
  88. /* This sequence need not be atomic. Do not disable IRQ. */
  89. writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
  90. data->op_base + UNIPHIER_SSCOPE);
  91. /* need a read back to confirm */
  92. readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
  93. }
  94. /**
  95. * __uniphier_cache_maint_common - run a queue operation for a particular level
  96. *
  97. * @data: cache controller specific data
  98. * @start: start address of range operation (don't care for "all" operation)
  99. * @size: data size of range operation (don't care for "all" operation)
  100. * @operation: flags to specify the desired cache operation
  101. */
  102. static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
  103. unsigned long start,
  104. unsigned long size,
  105. u32 operation)
  106. {
  107. unsigned long flags;
  108. /*
  109. * No spin lock is necessary here because:
  110. *
  111. * [1] This outer cache controller is able to accept maintenance
  112. * operations from multiple CPUs at a time in an SMP system; if a
  113. * maintenance operation is under way and another operation is issued,
  114. * the new one is stored in the queue. The controller performs one
  115. * operation after another. If the queue is full, the status register,
  116. * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
  117. * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
  118. * different instances for each CPU, i.e. each CPU can track the status
  119. * of the maintenance operations triggered by itself.
  120. *
  121. * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
  122. * SSCOQWN}, are shared between multiple CPUs, but the hardware still
  123. * guarantees the registration sequence is atomic; the write access to
  124. * them are arbitrated by the hardware. The first accessor to the
  125. * register, UNIPHIER_SSCOQM, holds the access right and it is released
  126. * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
  127. * is holding the access right, other CPUs fail to register operations.
  128. * One CPU should not hold the access right for a long time, so local
  129. * IRQs should be disabled while the following sequence.
  130. */
  131. local_irq_save(flags);
  132. /* clear the complete notification flag */
  133. writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
  134. do {
  135. /* set cache operation */
  136. writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
  137. data->op_base + UNIPHIER_SSCOQM);
  138. /* set address range if needed */
  139. if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
  140. writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
  141. writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
  142. }
  143. } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
  144. (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
  145. /* wait until the operation is completed */
  146. while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
  147. UNIPHIER_SSCOLPQS_EF))
  148. cpu_relax();
  149. local_irq_restore(flags);
  150. }
  151. static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
  152. u32 operation)
  153. {
  154. __uniphier_cache_maint_common(data, 0, 0,
  155. UNIPHIER_SSCOQM_S_ALL | operation);
  156. __uniphier_cache_sync(data);
  157. }
  158. static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
  159. unsigned long start, unsigned long end,
  160. u32 operation)
  161. {
  162. unsigned long size;
  163. /*
  164. * If the start address is not aligned,
  165. * perform a cache operation for the first cache-line
  166. */
  167. start = start & ~(data->line_size - 1);
  168. size = end - start;
  169. if (unlikely(size >= (unsigned long)(-data->line_size))) {
  170. /* this means cache operation for all range */
  171. __uniphier_cache_maint_all(data, operation);
  172. return;
  173. }
  174. /*
  175. * If the end address is not aligned,
  176. * perform a cache operation for the last cache-line
  177. */
  178. size = ALIGN(size, data->line_size);
  179. while (size) {
  180. unsigned long chunk_size = min_t(unsigned long, size,
  181. data->range_op_max_size);
  182. __uniphier_cache_maint_common(data, start, chunk_size,
  183. UNIPHIER_SSCOQM_S_RANGE | operation);
  184. start += chunk_size;
  185. size -= chunk_size;
  186. }
  187. __uniphier_cache_sync(data);
  188. }
  189. static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
  190. {
  191. u32 val = 0;
  192. if (on)
  193. val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
  194. writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
  195. }
  196. static void __init __uniphier_cache_set_active_ways(
  197. struct uniphier_cache_data *data)
  198. {
  199. unsigned int cpu;
  200. for_each_possible_cpu(cpu)
  201. writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
  202. }
  203. static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
  204. u32 operation)
  205. {
  206. struct uniphier_cache_data *data;
  207. list_for_each_entry(data, &uniphier_cache_list, list)
  208. __uniphier_cache_maint_range(data, start, end, operation);
  209. }
  210. static void uniphier_cache_maint_all(u32 operation)
  211. {
  212. struct uniphier_cache_data *data;
  213. list_for_each_entry(data, &uniphier_cache_list, list)
  214. __uniphier_cache_maint_all(data, operation);
  215. }
  216. static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
  217. {
  218. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
  219. }
  220. static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
  221. {
  222. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
  223. }
  224. static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
  225. {
  226. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
  227. }
  228. static void __init uniphier_cache_inv_all(void)
  229. {
  230. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
  231. }
  232. static void uniphier_cache_flush_all(void)
  233. {
  234. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
  235. }
  236. static void uniphier_cache_disable(void)
  237. {
  238. struct uniphier_cache_data *data;
  239. list_for_each_entry_reverse(data, &uniphier_cache_list, list)
  240. __uniphier_cache_enable(data, false);
  241. uniphier_cache_flush_all();
  242. }
  243. static void __init uniphier_cache_enable(void)
  244. {
  245. struct uniphier_cache_data *data;
  246. uniphier_cache_inv_all();
  247. list_for_each_entry(data, &uniphier_cache_list, list) {
  248. __uniphier_cache_enable(data, true);
  249. __uniphier_cache_set_active_ways(data);
  250. }
  251. }
  252. static void uniphier_cache_sync(void)
  253. {
  254. struct uniphier_cache_data *data;
  255. list_for_each_entry(data, &uniphier_cache_list, list)
  256. __uniphier_cache_sync(data);
  257. }
  258. static const struct of_device_id uniphier_cache_match[] __initconst = {
  259. { .compatible = "socionext,uniphier-system-cache" },
  260. { /* sentinel */ }
  261. };
  262. static int __init __uniphier_cache_init(struct device_node *np,
  263. unsigned int *cache_level)
  264. {
  265. struct uniphier_cache_data *data;
  266. u32 level, cache_size;
  267. struct device_node *next_np;
  268. int ret = 0;
  269. if (!of_match_node(uniphier_cache_match, np)) {
  270. pr_err("L%d: not compatible with uniphier cache\n",
  271. *cache_level);
  272. return -EINVAL;
  273. }
  274. if (of_property_read_u32(np, "cache-level", &level)) {
  275. pr_err("L%d: cache-level is not specified\n", *cache_level);
  276. return -EINVAL;
  277. }
  278. if (level != *cache_level) {
  279. pr_err("L%d: cache-level is unexpected value %d\n",
  280. *cache_level, level);
  281. return -EINVAL;
  282. }
  283. if (!of_property_read_bool(np, "cache-unified")) {
  284. pr_err("L%d: cache-unified is not specified\n", *cache_level);
  285. return -EINVAL;
  286. }
  287. data = kzalloc(sizeof(*data), GFP_KERNEL);
  288. if (!data)
  289. return -ENOMEM;
  290. if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
  291. !is_power_of_2(data->line_size)) {
  292. pr_err("L%d: cache-line-size is unspecified or invalid\n",
  293. *cache_level);
  294. ret = -EINVAL;
  295. goto err;
  296. }
  297. if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
  298. !is_power_of_2(data->nsets)) {
  299. pr_err("L%d: cache-sets is unspecified or invalid\n",
  300. *cache_level);
  301. ret = -EINVAL;
  302. goto err;
  303. }
  304. if (of_property_read_u32(np, "cache-size", &cache_size) ||
  305. cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
  306. pr_err("L%d: cache-size is unspecified or invalid\n",
  307. *cache_level);
  308. ret = -EINVAL;
  309. goto err;
  310. }
  311. data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
  312. 0);
  313. data->ctrl_base = of_iomap(np, 0);
  314. if (!data->ctrl_base) {
  315. pr_err("L%d: failed to map control register\n", *cache_level);
  316. ret = -ENOMEM;
  317. goto err;
  318. }
  319. data->rev_base = of_iomap(np, 1);
  320. if (!data->rev_base) {
  321. pr_err("L%d: failed to map revision register\n", *cache_level);
  322. ret = -ENOMEM;
  323. goto err;
  324. }
  325. data->op_base = of_iomap(np, 2);
  326. if (!data->op_base) {
  327. pr_err("L%d: failed to map operation register\n", *cache_level);
  328. ret = -ENOMEM;
  329. goto err;
  330. }
  331. data->way_ctrl_base = data->ctrl_base + 0xc00;
  332. if (*cache_level == 2) {
  333. u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
  334. /*
  335. * The size of range operation is limited to (1 << 22) or less
  336. * for PH-sLD8 or older SoCs.
  337. */
  338. if (revision <= 0x16)
  339. data->range_op_max_size = (u32)1 << 22;
  340. /*
  341. * Unfortunatly, the offset address of active way control base
  342. * varies from SoC to SoC.
  343. */
  344. switch (revision) {
  345. case 0x11: /* sLD3 */
  346. data->way_ctrl_base = data->ctrl_base + 0x870;
  347. break;
  348. case 0x12: /* LD4 */
  349. case 0x16: /* sld8 */
  350. data->way_ctrl_base = data->ctrl_base + 0x840;
  351. break;
  352. default:
  353. break;
  354. }
  355. }
  356. data->range_op_max_size -= data->line_size;
  357. INIT_LIST_HEAD(&data->list);
  358. list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
  359. /*
  360. * OK, this level has been successfully initialized. Look for the next
  361. * level cache. Do not roll back even if the initialization of the
  362. * next level cache fails because we want to continue with available
  363. * cache levels.
  364. */
  365. next_np = of_find_next_cache_node(np);
  366. if (next_np) {
  367. (*cache_level)++;
  368. ret = __uniphier_cache_init(next_np, cache_level);
  369. }
  370. of_node_put(next_np);
  371. return ret;
  372. err:
  373. iounmap(data->op_base);
  374. iounmap(data->rev_base);
  375. iounmap(data->ctrl_base);
  376. kfree(data);
  377. return ret;
  378. }
  379. int __init uniphier_cache_init(void)
  380. {
  381. struct device_node *np = NULL;
  382. unsigned int cache_level;
  383. int ret = 0;
  384. /* look for level 2 cache */
  385. while ((np = of_find_matching_node(np, uniphier_cache_match)))
  386. if (!of_property_read_u32(np, "cache-level", &cache_level) &&
  387. cache_level == 2)
  388. break;
  389. if (!np)
  390. return -ENODEV;
  391. ret = __uniphier_cache_init(np, &cache_level);
  392. of_node_put(np);
  393. if (ret) {
  394. /*
  395. * Error out iif L2 initialization fails. Continue with any
  396. * error on L3 or outer because they are optional.
  397. */
  398. if (cache_level == 2) {
  399. pr_err("failed to initialize L2 cache\n");
  400. return ret;
  401. }
  402. cache_level--;
  403. ret = 0;
  404. }
  405. outer_cache.inv_range = uniphier_cache_inv_range;
  406. outer_cache.clean_range = uniphier_cache_clean_range;
  407. outer_cache.flush_range = uniphier_cache_flush_range;
  408. outer_cache.flush_all = uniphier_cache_flush_all;
  409. outer_cache.disable = uniphier_cache_disable;
  410. outer_cache.sync = uniphier_cache_sync;
  411. uniphier_cache_enable();
  412. pr_info("enabled outer cache (cache level: %d)\n", cache_level);
  413. return ret;
  414. }