spitfire.h 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
  3. *
  4. * Copyright (C) 1996 David S. Miller ([email protected])
  5. */
  6. #ifndef _SPARC64_SPITFIRE_H
  7. #define _SPARC64_SPITFIRE_H
  8. #ifdef CONFIG_SPARC64
  9. #include <asm/asi.h>
  10. /* The following register addresses are accessible via ASI_DMMU
  11. * and ASI_IMMU, that is there is a distinct and unique copy of
  12. * each these registers for each TLB.
  13. */
  14. #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
  15. #define TLB_SFSR 0x0000000000000018 /* All chips */
  16. #define TSB_REG 0x0000000000000028 /* All chips */
  17. #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
  18. #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
  19. #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
  20. #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
  21. #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
  22. #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
  23. #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
  24. /* These registers only exist as one entity, and are accessed
  25. * via ASI_DMMU only.
  26. */
  27. #define PRIMARY_CONTEXT 0x0000000000000008
  28. #define SECONDARY_CONTEXT 0x0000000000000010
  29. #define DMMU_SFAR 0x0000000000000020
  30. #define VIRT_WATCHPOINT 0x0000000000000038
  31. #define PHYS_WATCHPOINT 0x0000000000000040
  32. #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
  33. #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
  34. #define L1DCACHE_SIZE 0x4000
  35. #define SUN4V_CHIP_INVALID 0x00
  36. #define SUN4V_CHIP_NIAGARA1 0x01
  37. #define SUN4V_CHIP_NIAGARA2 0x02
  38. #define SUN4V_CHIP_NIAGARA3 0x03
  39. #define SUN4V_CHIP_NIAGARA4 0x04
  40. #define SUN4V_CHIP_NIAGARA5 0x05
  41. #define SUN4V_CHIP_SPARC_M6 0x06
  42. #define SUN4V_CHIP_SPARC_M7 0x07
  43. #define SUN4V_CHIP_SPARC_M8 0x08
  44. #define SUN4V_CHIP_SPARC64X 0x8a
  45. #define SUN4V_CHIP_SPARC_SN 0x8b
  46. #define SUN4V_CHIP_UNKNOWN 0xff
  47. /*
  48. * The following CPU_ID_xxx constants are used
  49. * to identify the CPU type in the setup phase
  50. * (see head_64.S)
  51. */
  52. #define CPU_ID_NIAGARA1 ('1')
  53. #define CPU_ID_NIAGARA2 ('2')
  54. #define CPU_ID_NIAGARA3 ('3')
  55. #define CPU_ID_NIAGARA4 ('4')
  56. #define CPU_ID_NIAGARA5 ('5')
  57. #define CPU_ID_M6 ('6')
  58. #define CPU_ID_M7 ('7')
  59. #define CPU_ID_M8 ('8')
  60. #define CPU_ID_SONOMA1 ('N')
  61. #ifndef __ASSEMBLY__
  62. enum ultra_tlb_layout {
  63. spitfire = 0,
  64. cheetah = 1,
  65. cheetah_plus = 2,
  66. hypervisor = 3,
  67. };
  68. extern enum ultra_tlb_layout tlb_type;
  69. extern int sun4v_chip_type;
  70. extern int cheetah_pcache_forced_on;
  71. void cheetah_enable_pcache(void);
  72. #define sparc64_highest_locked_tlbent() \
  73. (tlb_type == spitfire ? \
  74. SPITFIRE_HIGHEST_LOCKED_TLBENT : \
  75. CHEETAH_HIGHEST_LOCKED_TLBENT)
  76. extern int num_kernel_image_mappings;
  77. /* The data cache is write through, so this just invalidates the
  78. * specified line.
  79. */
  80. static inline void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
  81. {
  82. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  83. "membar #Sync"
  84. : /* No outputs */
  85. : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
  86. }
  87. /* The instruction cache lines are flushed with this, but note that
  88. * this does not flush the pipeline. It is possible for a line to
  89. * get flushed but stale instructions to still be in the pipeline,
  90. * a flush instruction (to any address) is sufficient to handle
  91. * this issue after the line is invalidated.
  92. */
  93. static inline void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
  94. {
  95. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  96. "membar #Sync"
  97. : /* No outputs */
  98. : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
  99. }
  100. static inline unsigned long spitfire_get_dtlb_data(int entry)
  101. {
  102. unsigned long data;
  103. __asm__ __volatile__("ldxa [%1] %2, %0"
  104. : "=r" (data)
  105. : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
  106. /* Clear TTE diag bits. */
  107. data &= ~0x0003fe0000000000UL;
  108. return data;
  109. }
  110. static inline unsigned long spitfire_get_dtlb_tag(int entry)
  111. {
  112. unsigned long tag;
  113. __asm__ __volatile__("ldxa [%1] %2, %0"
  114. : "=r" (tag)
  115. : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
  116. return tag;
  117. }
  118. static inline void spitfire_put_dtlb_data(int entry, unsigned long data)
  119. {
  120. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  121. "membar #Sync"
  122. : /* No outputs */
  123. : "r" (data), "r" (entry << 3),
  124. "i" (ASI_DTLB_DATA_ACCESS));
  125. }
  126. static inline unsigned long spitfire_get_itlb_data(int entry)
  127. {
  128. unsigned long data;
  129. __asm__ __volatile__("ldxa [%1] %2, %0"
  130. : "=r" (data)
  131. : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
  132. /* Clear TTE diag bits. */
  133. data &= ~0x0003fe0000000000UL;
  134. return data;
  135. }
  136. static inline unsigned long spitfire_get_itlb_tag(int entry)
  137. {
  138. unsigned long tag;
  139. __asm__ __volatile__("ldxa [%1] %2, %0"
  140. : "=r" (tag)
  141. : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
  142. return tag;
  143. }
  144. static inline void spitfire_put_itlb_data(int entry, unsigned long data)
  145. {
  146. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  147. "membar #Sync"
  148. : /* No outputs */
  149. : "r" (data), "r" (entry << 3),
  150. "i" (ASI_ITLB_DATA_ACCESS));
  151. }
  152. static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page)
  153. {
  154. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  155. "membar #Sync"
  156. : /* No outputs */
  157. : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
  158. }
  159. static inline void spitfire_flush_itlb_nucleus_page(unsigned long page)
  160. {
  161. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  162. "membar #Sync"
  163. : /* No outputs */
  164. : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
  165. }
  166. /* Cheetah has "all non-locked" tlb flushes. */
  167. static inline void cheetah_flush_dtlb_all(void)
  168. {
  169. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  170. "membar #Sync"
  171. : /* No outputs */
  172. : "r" (0x80), "i" (ASI_DMMU_DEMAP));
  173. }
  174. static inline void cheetah_flush_itlb_all(void)
  175. {
  176. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  177. "membar #Sync"
  178. : /* No outputs */
  179. : "r" (0x80), "i" (ASI_IMMU_DEMAP));
  180. }
  181. /* Cheetah has a 4-tlb layout so direct access is a bit different.
  182. * The first two TLBs are fully assosciative, hold 16 entries, and are
  183. * used only for locked and >8K sized translations. One exists for
  184. * data accesses and one for instruction accesses.
  185. *
  186. * The third TLB is for data accesses to 8K non-locked translations, is
  187. * 2 way assosciative, and holds 512 entries. The fourth TLB is for
  188. * instruction accesses to 8K non-locked translations, is 2 way
  189. * assosciative, and holds 128 entries.
  190. *
  191. * Cheetah has some bug where bogus data can be returned from
  192. * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
  193. * the problem for me. -DaveM
  194. */
  195. static inline unsigned long cheetah_get_ldtlb_data(int entry)
  196. {
  197. unsigned long data;
  198. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  199. "ldxa [%1] %2, %0"
  200. : "=r" (data)
  201. : "r" ((0 << 16) | (entry << 3)),
  202. "i" (ASI_DTLB_DATA_ACCESS));
  203. return data;
  204. }
  205. static inline unsigned long cheetah_get_litlb_data(int entry)
  206. {
  207. unsigned long data;
  208. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  209. "ldxa [%1] %2, %0"
  210. : "=r" (data)
  211. : "r" ((0 << 16) | (entry << 3)),
  212. "i" (ASI_ITLB_DATA_ACCESS));
  213. return data;
  214. }
  215. static inline unsigned long cheetah_get_ldtlb_tag(int entry)
  216. {
  217. unsigned long tag;
  218. __asm__ __volatile__("ldxa [%1] %2, %0"
  219. : "=r" (tag)
  220. : "r" ((0 << 16) | (entry << 3)),
  221. "i" (ASI_DTLB_TAG_READ));
  222. return tag;
  223. }
  224. static inline unsigned long cheetah_get_litlb_tag(int entry)
  225. {
  226. unsigned long tag;
  227. __asm__ __volatile__("ldxa [%1] %2, %0"
  228. : "=r" (tag)
  229. : "r" ((0 << 16) | (entry << 3)),
  230. "i" (ASI_ITLB_TAG_READ));
  231. return tag;
  232. }
  233. static inline void cheetah_put_ldtlb_data(int entry, unsigned long data)
  234. {
  235. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  236. "membar #Sync"
  237. : /* No outputs */
  238. : "r" (data),
  239. "r" ((0 << 16) | (entry << 3)),
  240. "i" (ASI_DTLB_DATA_ACCESS));
  241. }
  242. static inline void cheetah_put_litlb_data(int entry, unsigned long data)
  243. {
  244. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  245. "membar #Sync"
  246. : /* No outputs */
  247. : "r" (data),
  248. "r" ((0 << 16) | (entry << 3)),
  249. "i" (ASI_ITLB_DATA_ACCESS));
  250. }
  251. static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb)
  252. {
  253. unsigned long data;
  254. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  255. "ldxa [%1] %2, %0"
  256. : "=r" (data)
  257. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
  258. return data;
  259. }
  260. static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
  261. {
  262. unsigned long tag;
  263. __asm__ __volatile__("ldxa [%1] %2, %0"
  264. : "=r" (tag)
  265. : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
  266. return tag;
  267. }
  268. static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
  269. {
  270. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  271. "membar #Sync"
  272. : /* No outputs */
  273. : "r" (data),
  274. "r" ((tlb << 16) | (entry << 3)),
  275. "i" (ASI_DTLB_DATA_ACCESS));
  276. }
  277. static inline unsigned long cheetah_get_itlb_data(int entry)
  278. {
  279. unsigned long data;
  280. __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
  281. "ldxa [%1] %2, %0"
  282. : "=r" (data)
  283. : "r" ((2 << 16) | (entry << 3)),
  284. "i" (ASI_ITLB_DATA_ACCESS));
  285. return data;
  286. }
  287. static inline unsigned long cheetah_get_itlb_tag(int entry)
  288. {
  289. unsigned long tag;
  290. __asm__ __volatile__("ldxa [%1] %2, %0"
  291. : "=r" (tag)
  292. : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
  293. return tag;
  294. }
  295. static inline void cheetah_put_itlb_data(int entry, unsigned long data)
  296. {
  297. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  298. "membar #Sync"
  299. : /* No outputs */
  300. : "r" (data), "r" ((2 << 16) | (entry << 3)),
  301. "i" (ASI_ITLB_DATA_ACCESS));
  302. }
  303. #endif /* !(__ASSEMBLY__) */
  304. #endif /* CONFIG_SPARC64 */
  305. #endif /* !(_SPARC64_SPITFIRE_H) */