hash_native.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * native hashtable management.
  4. *
  5. * SMP scalability work:
  6. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  7. */
  8. #undef DEBUG_LOW
  9. #include <linux/spinlock.h>
  10. #include <linux/bitops.h>
  11. #include <linux/of.h>
  12. #include <linux/processor.h>
  13. #include <linux/threads.h>
  14. #include <linux/smp.h>
  15. #include <linux/pgtable.h>
  16. #include <asm/machdep.h>
  17. #include <asm/mmu.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/trace.h>
  20. #include <asm/tlb.h>
  21. #include <asm/cputable.h>
  22. #include <asm/udbg.h>
  23. #include <asm/kexec.h>
  24. #include <asm/ppc-opcode.h>
  25. #include <asm/feature-fixups.h>
  26. #include <misc/cxl-base.h>
  27. #ifdef DEBUG_LOW
  28. #define DBG_LOW(fmt...) udbg_printf(fmt)
  29. #else
  30. #define DBG_LOW(fmt...)
  31. #endif
  32. #ifdef __BIG_ENDIAN__
  33. #define HPTE_LOCK_BIT 3
  34. #else
  35. #define HPTE_LOCK_BIT (56+3)
  36. #endif
  37. static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
  38. #ifdef CONFIG_LOCKDEP
  39. static struct lockdep_map hpte_lock_map =
  40. STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
  41. static void acquire_hpte_lock(void)
  42. {
  43. lock_map_acquire(&hpte_lock_map);
  44. }
  45. static void release_hpte_lock(void)
  46. {
  47. lock_map_release(&hpte_lock_map);
  48. }
  49. #else
  50. static void acquire_hpte_lock(void)
  51. {
  52. }
  53. static void release_hpte_lock(void)
  54. {
  55. }
  56. #endif
  57. static inline unsigned long ___tlbie(unsigned long vpn, int psize,
  58. int apsize, int ssize)
  59. {
  60. unsigned long va;
  61. unsigned int penc;
  62. unsigned long sllp;
  63. /*
  64. * We need 14 to 65 bits of va for a tlibe of 4K page
  65. * With vpn we ignore the lower VPN_SHIFT bits already.
  66. * And top two bits are already ignored because we can
  67. * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
  68. * of 12.
  69. */
  70. va = vpn << VPN_SHIFT;
  71. /*
  72. * clear top 16 bits of 64bit va, non SLS segment
  73. * Older versions of the architecture (2.02 and earler) require the
  74. * masking of the top 16 bits.
  75. */
  76. if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
  77. va &= ~(0xffffULL << 48);
  78. switch (psize) {
  79. case MMU_PAGE_4K:
  80. /* clear out bits after (52) [0....52.....63] */
  81. va &= ~((1ul << (64 - 52)) - 1);
  82. va |= ssize << 8;
  83. sllp = get_sllp_encoding(apsize);
  84. va |= sllp << 5;
  85. asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
  86. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  87. : "memory");
  88. break;
  89. default:
  90. /* We need 14 to 14 + i bits of va */
  91. penc = mmu_psize_defs[psize].penc[apsize];
  92. va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
  93. va |= penc << 12;
  94. va |= ssize << 8;
  95. /*
  96. * AVAL bits:
  97. * We don't need all the bits, but rest of the bits
  98. * must be ignored by the processor.
  99. * vpn cover upto 65 bits of va. (0...65) and we need
  100. * 58..64 bits of va.
  101. */
  102. va |= (vpn & 0xfe); /* AVAL */
  103. va |= 1; /* L */
  104. asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
  105. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  106. : "memory");
  107. break;
  108. }
  109. return va;
  110. }
  111. static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
  112. int apsize, int ssize)
  113. {
  114. if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
  115. /* Radix flush for a hash guest */
  116. unsigned long rb,rs,prs,r,ric;
  117. rb = PPC_BIT(52); /* IS = 2 */
  118. rs = 0; /* lpid = 0 */
  119. prs = 0; /* partition scoped */
  120. r = 1; /* radix format */
  121. ric = 0; /* RIC_FLSUH_TLB */
  122. /*
  123. * Need the extra ptesync to make sure we don't
  124. * re-order the tlbie
  125. */
  126. asm volatile("ptesync": : :"memory");
  127. asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
  128. : : "r"(rb), "i"(r), "i"(prs),
  129. "i"(ric), "r"(rs) : "memory");
  130. }
  131. if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
  132. /* Need the extra ptesync to ensure we don't reorder tlbie*/
  133. asm volatile("ptesync": : :"memory");
  134. ___tlbie(vpn, psize, apsize, ssize);
  135. }
  136. }
  137. static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
  138. {
  139. unsigned long rb;
  140. rb = ___tlbie(vpn, psize, apsize, ssize);
  141. trace_tlbie(0, 0, rb, 0, 0, 0, 0);
  142. }
  143. static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
  144. {
  145. unsigned long va;
  146. unsigned int penc;
  147. unsigned long sllp;
  148. /* VPN_SHIFT can be atmost 12 */
  149. va = vpn << VPN_SHIFT;
  150. /*
  151. * clear top 16 bits of 64 bit va, non SLS segment
  152. * Older versions of the architecture (2.02 and earler) require the
  153. * masking of the top 16 bits.
  154. */
  155. if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
  156. va &= ~(0xffffULL << 48);
  157. switch (psize) {
  158. case MMU_PAGE_4K:
  159. /* clear out bits after(52) [0....52.....63] */
  160. va &= ~((1ul << (64 - 52)) - 1);
  161. va |= ssize << 8;
  162. sllp = get_sllp_encoding(apsize);
  163. va |= sllp << 5;
  164. asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
  165. : : "r" (va), "i" (CPU_FTR_ARCH_206)
  166. : "memory");
  167. break;
  168. default:
  169. /* We need 14 to 14 + i bits of va */
  170. penc = mmu_psize_defs[psize].penc[apsize];
  171. va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
  172. va |= penc << 12;
  173. va |= ssize << 8;
  174. /*
  175. * AVAL bits:
  176. * We don't need all the bits, but rest of the bits
  177. * must be ignored by the processor.
  178. * vpn cover upto 65 bits of va. (0...65) and we need
  179. * 58..64 bits of va.
  180. */
  181. va |= (vpn & 0xfe);
  182. va |= 1; /* L */
  183. asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
  184. : : "r" (va), "i" (CPU_FTR_ARCH_206)
  185. : "memory");
  186. break;
  187. }
  188. trace_tlbie(0, 1, va, 0, 0, 0, 0);
  189. }
  190. static inline void tlbie(unsigned long vpn, int psize, int apsize,
  191. int ssize, int local)
  192. {
  193. unsigned int use_local;
  194. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  195. use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
  196. if (use_local)
  197. use_local = mmu_psize_defs[psize].tlbiel;
  198. if (lock_tlbie && !use_local)
  199. raw_spin_lock(&native_tlbie_lock);
  200. asm volatile("ptesync": : :"memory");
  201. if (use_local) {
  202. __tlbiel(vpn, psize, apsize, ssize);
  203. ppc_after_tlbiel_barrier();
  204. } else {
  205. __tlbie(vpn, psize, apsize, ssize);
  206. fixup_tlbie_vpn(vpn, psize, apsize, ssize);
  207. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  208. }
  209. if (lock_tlbie && !use_local)
  210. raw_spin_unlock(&native_tlbie_lock);
  211. }
  212. static inline void native_lock_hpte(struct hash_pte *hptep)
  213. {
  214. unsigned long *word = (unsigned long *)&hptep->v;
  215. acquire_hpte_lock();
  216. while (1) {
  217. if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
  218. break;
  219. spin_begin();
  220. while(test_bit(HPTE_LOCK_BIT, word))
  221. spin_cpu_relax();
  222. spin_end();
  223. }
  224. }
  225. static inline void native_unlock_hpte(struct hash_pte *hptep)
  226. {
  227. unsigned long *word = (unsigned long *)&hptep->v;
  228. release_hpte_lock();
  229. clear_bit_unlock(HPTE_LOCK_BIT, word);
  230. }
  231. static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
  232. unsigned long pa, unsigned long rflags,
  233. unsigned long vflags, int psize, int apsize, int ssize)
  234. {
  235. struct hash_pte *hptep = htab_address + hpte_group;
  236. unsigned long hpte_v, hpte_r;
  237. unsigned long flags;
  238. int i;
  239. local_irq_save(flags);
  240. if (!(vflags & HPTE_V_BOLTED)) {
  241. DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
  242. " rflags=%lx, vflags=%lx, psize=%d)\n",
  243. hpte_group, vpn, pa, rflags, vflags, psize);
  244. }
  245. for (i = 0; i < HPTES_PER_GROUP; i++) {
  246. if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
  247. /* retry with lock held */
  248. native_lock_hpte(hptep);
  249. if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
  250. break;
  251. native_unlock_hpte(hptep);
  252. }
  253. hptep++;
  254. }
  255. if (i == HPTES_PER_GROUP) {
  256. local_irq_restore(flags);
  257. return -1;
  258. }
  259. hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
  260. hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
  261. if (!(vflags & HPTE_V_BOLTED)) {
  262. DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
  263. i, hpte_v, hpte_r);
  264. }
  265. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  266. hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
  267. hpte_v = hpte_old_to_new_v(hpte_v);
  268. }
  269. hptep->r = cpu_to_be64(hpte_r);
  270. /* Guarantee the second dword is visible before the valid bit */
  271. eieio();
  272. /*
  273. * Now set the first dword including the valid bit
  274. * NOTE: this also unlocks the hpte
  275. */
  276. release_hpte_lock();
  277. hptep->v = cpu_to_be64(hpte_v);
  278. __asm__ __volatile__ ("ptesync" : : : "memory");
  279. local_irq_restore(flags);
  280. return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
  281. }
  282. static long native_hpte_remove(unsigned long hpte_group)
  283. {
  284. unsigned long hpte_v, flags;
  285. struct hash_pte *hptep;
  286. int i;
  287. int slot_offset;
  288. local_irq_save(flags);
  289. DBG_LOW(" remove(group=%lx)\n", hpte_group);
  290. /* pick a random entry to start at */
  291. slot_offset = mftb() & 0x7;
  292. for (i = 0; i < HPTES_PER_GROUP; i++) {
  293. hptep = htab_address + hpte_group + slot_offset;
  294. hpte_v = be64_to_cpu(hptep->v);
  295. if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
  296. /* retry with lock held */
  297. native_lock_hpte(hptep);
  298. hpte_v = be64_to_cpu(hptep->v);
  299. if ((hpte_v & HPTE_V_VALID)
  300. && !(hpte_v & HPTE_V_BOLTED))
  301. break;
  302. native_unlock_hpte(hptep);
  303. }
  304. slot_offset++;
  305. slot_offset &= 0x7;
  306. }
  307. if (i == HPTES_PER_GROUP) {
  308. i = -1;
  309. goto out;
  310. }
  311. /* Invalidate the hpte. NOTE: this also unlocks it */
  312. release_hpte_lock();
  313. hptep->v = 0;
  314. out:
  315. local_irq_restore(flags);
  316. return i;
  317. }
  318. static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
  319. unsigned long vpn, int bpsize,
  320. int apsize, int ssize, unsigned long flags)
  321. {
  322. struct hash_pte *hptep = htab_address + slot;
  323. unsigned long hpte_v, want_v;
  324. int ret = 0, local = 0;
  325. unsigned long irqflags;
  326. local_irq_save(irqflags);
  327. want_v = hpte_encode_avpn(vpn, bpsize, ssize);
  328. DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
  329. vpn, want_v & HPTE_V_AVPN, slot, newpp);
  330. hpte_v = hpte_get_old_v(hptep);
  331. /*
  332. * We need to invalidate the TLB always because hpte_remove doesn't do
  333. * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
  334. * random entry from it. When we do that we don't invalidate the TLB
  335. * (hpte_remove) because we assume the old translation is still
  336. * technically "valid".
  337. */
  338. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
  339. DBG_LOW(" -> miss\n");
  340. ret = -1;
  341. } else {
  342. native_lock_hpte(hptep);
  343. /* recheck with locks held */
  344. hpte_v = hpte_get_old_v(hptep);
  345. if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
  346. !(hpte_v & HPTE_V_VALID))) {
  347. ret = -1;
  348. } else {
  349. DBG_LOW(" -> hit\n");
  350. /* Update the HPTE */
  351. hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
  352. ~(HPTE_R_PPP | HPTE_R_N)) |
  353. (newpp & (HPTE_R_PPP | HPTE_R_N |
  354. HPTE_R_C)));
  355. }
  356. native_unlock_hpte(hptep);
  357. }
  358. if (flags & HPTE_LOCAL_UPDATE)
  359. local = 1;
  360. /*
  361. * Ensure it is out of the tlb too if it is not a nohpte fault
  362. */
  363. if (!(flags & HPTE_NOHPTE_UPDATE))
  364. tlbie(vpn, bpsize, apsize, ssize, local);
  365. local_irq_restore(irqflags);
  366. return ret;
  367. }
  368. static long __native_hpte_find(unsigned long want_v, unsigned long slot)
  369. {
  370. struct hash_pte *hptep;
  371. unsigned long hpte_v;
  372. unsigned long i;
  373. for (i = 0; i < HPTES_PER_GROUP; i++) {
  374. hptep = htab_address + slot;
  375. hpte_v = hpte_get_old_v(hptep);
  376. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
  377. /* HPTE matches */
  378. return slot;
  379. ++slot;
  380. }
  381. return -1;
  382. }
  383. static long native_hpte_find(unsigned long vpn, int psize, int ssize)
  384. {
  385. unsigned long hpte_group;
  386. unsigned long want_v;
  387. unsigned long hash;
  388. long slot;
  389. hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
  390. want_v = hpte_encode_avpn(vpn, psize, ssize);
  391. /*
  392. * We try to keep bolted entries always in primary hash
  393. * But in some case we can find them in secondary too.
  394. */
  395. hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  396. slot = __native_hpte_find(want_v, hpte_group);
  397. if (slot < 0) {
  398. /* Try in secondary */
  399. hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
  400. slot = __native_hpte_find(want_v, hpte_group);
  401. if (slot < 0)
  402. return -1;
  403. }
  404. return slot;
  405. }
  406. /*
  407. * Update the page protection bits. Intended to be used to create
  408. * guard pages for kernel data structures on pages which are bolted
  409. * in the HPT. Assumes pages being operated on will not be stolen.
  410. *
  411. * No need to lock here because we should be the only user.
  412. */
  413. static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  414. int psize, int ssize)
  415. {
  416. unsigned long vpn;
  417. unsigned long vsid;
  418. long slot;
  419. struct hash_pte *hptep;
  420. unsigned long flags;
  421. local_irq_save(flags);
  422. vsid = get_kernel_vsid(ea, ssize);
  423. vpn = hpt_vpn(ea, vsid, ssize);
  424. slot = native_hpte_find(vpn, psize, ssize);
  425. if (slot == -1)
  426. panic("could not find page to bolt\n");
  427. hptep = htab_address + slot;
  428. /* Update the HPTE */
  429. hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
  430. ~(HPTE_R_PPP | HPTE_R_N)) |
  431. (newpp & (HPTE_R_PPP | HPTE_R_N)));
  432. /*
  433. * Ensure it is out of the tlb too. Bolted entries base and
  434. * actual page size will be same.
  435. */
  436. tlbie(vpn, psize, psize, ssize, 0);
  437. local_irq_restore(flags);
  438. }
  439. /*
  440. * Remove a bolted kernel entry. Memory hotplug uses this.
  441. *
  442. * No need to lock here because we should be the only user.
  443. */
  444. static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
  445. {
  446. unsigned long vpn;
  447. unsigned long vsid;
  448. long slot;
  449. struct hash_pte *hptep;
  450. unsigned long flags;
  451. local_irq_save(flags);
  452. vsid = get_kernel_vsid(ea, ssize);
  453. vpn = hpt_vpn(ea, vsid, ssize);
  454. slot = native_hpte_find(vpn, psize, ssize);
  455. if (slot == -1)
  456. return -ENOENT;
  457. hptep = htab_address + slot;
  458. VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
  459. /* Invalidate the hpte */
  460. hptep->v = 0;
  461. /* Invalidate the TLB */
  462. tlbie(vpn, psize, psize, ssize, 0);
  463. local_irq_restore(flags);
  464. return 0;
  465. }
  466. static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
  467. int bpsize, int apsize, int ssize, int local)
  468. {
  469. struct hash_pte *hptep = htab_address + slot;
  470. unsigned long hpte_v;
  471. unsigned long want_v;
  472. unsigned long flags;
  473. local_irq_save(flags);
  474. DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
  475. want_v = hpte_encode_avpn(vpn, bpsize, ssize);
  476. hpte_v = hpte_get_old_v(hptep);
  477. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
  478. native_lock_hpte(hptep);
  479. /* recheck with locks held */
  480. hpte_v = hpte_get_old_v(hptep);
  481. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
  482. /* Invalidate the hpte. NOTE: this also unlocks it */
  483. release_hpte_lock();
  484. hptep->v = 0;
  485. } else
  486. native_unlock_hpte(hptep);
  487. }
  488. /*
  489. * We need to invalidate the TLB always because hpte_remove doesn't do
  490. * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
  491. * random entry from it. When we do that we don't invalidate the TLB
  492. * (hpte_remove) because we assume the old translation is still
  493. * technically "valid".
  494. */
  495. tlbie(vpn, bpsize, apsize, ssize, local);
  496. local_irq_restore(flags);
  497. }
  498. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  499. static void native_hugepage_invalidate(unsigned long vsid,
  500. unsigned long addr,
  501. unsigned char *hpte_slot_array,
  502. int psize, int ssize, int local)
  503. {
  504. int i;
  505. struct hash_pte *hptep;
  506. int actual_psize = MMU_PAGE_16M;
  507. unsigned int max_hpte_count, valid;
  508. unsigned long flags, s_addr = addr;
  509. unsigned long hpte_v, want_v, shift;
  510. unsigned long hidx, vpn = 0, hash, slot;
  511. shift = mmu_psize_defs[psize].shift;
  512. max_hpte_count = 1U << (PMD_SHIFT - shift);
  513. local_irq_save(flags);
  514. for (i = 0; i < max_hpte_count; i++) {
  515. valid = hpte_valid(hpte_slot_array, i);
  516. if (!valid)
  517. continue;
  518. hidx = hpte_hash_index(hpte_slot_array, i);
  519. /* get the vpn */
  520. addr = s_addr + (i * (1ul << shift));
  521. vpn = hpt_vpn(addr, vsid, ssize);
  522. hash = hpt_hash(vpn, shift, ssize);
  523. if (hidx & _PTEIDX_SECONDARY)
  524. hash = ~hash;
  525. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  526. slot += hidx & _PTEIDX_GROUP_IX;
  527. hptep = htab_address + slot;
  528. want_v = hpte_encode_avpn(vpn, psize, ssize);
  529. hpte_v = hpte_get_old_v(hptep);
  530. /* Even if we miss, we need to invalidate the TLB */
  531. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
  532. /* recheck with locks held */
  533. native_lock_hpte(hptep);
  534. hpte_v = hpte_get_old_v(hptep);
  535. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
  536. /* Invalidate the hpte. NOTE: this also unlocks it */
  537. release_hpte_lock();
  538. hptep->v = 0;
  539. } else
  540. native_unlock_hpte(hptep);
  541. }
  542. /*
  543. * We need to do tlb invalidate for all the address, tlbie
  544. * instruction compares entry_VA in tlb with the VA specified
  545. * here
  546. */
  547. tlbie(vpn, psize, actual_psize, ssize, local);
  548. }
  549. local_irq_restore(flags);
  550. }
  551. #else
  552. static void native_hugepage_invalidate(unsigned long vsid,
  553. unsigned long addr,
  554. unsigned char *hpte_slot_array,
  555. int psize, int ssize, int local)
  556. {
  557. WARN(1, "%s called without THP support\n", __func__);
  558. }
  559. #endif
  560. static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  561. int *psize, int *apsize, int *ssize, unsigned long *vpn)
  562. {
  563. unsigned long avpn, pteg, vpi;
  564. unsigned long hpte_v = be64_to_cpu(hpte->v);
  565. unsigned long hpte_r = be64_to_cpu(hpte->r);
  566. unsigned long vsid, seg_off;
  567. int size, a_size, shift;
  568. /* Look at the 8 bit LP value */
  569. unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
  570. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  571. hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
  572. hpte_r = hpte_new_to_old_r(hpte_r);
  573. }
  574. if (!(hpte_v & HPTE_V_LARGE)) {
  575. size = MMU_PAGE_4K;
  576. a_size = MMU_PAGE_4K;
  577. } else {
  578. size = hpte_page_sizes[lp] & 0xf;
  579. a_size = hpte_page_sizes[lp] >> 4;
  580. }
  581. /* This works for all page sizes, and for 256M and 1T segments */
  582. *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
  583. shift = mmu_psize_defs[size].shift;
  584. avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
  585. pteg = slot / HPTES_PER_GROUP;
  586. if (hpte_v & HPTE_V_SECONDARY)
  587. pteg = ~pteg;
  588. switch (*ssize) {
  589. case MMU_SEGSIZE_256M:
  590. /* We only have 28 - 23 bits of seg_off in avpn */
  591. seg_off = (avpn & 0x1f) << 23;
  592. vsid = avpn >> 5;
  593. /* We can find more bits from the pteg value */
  594. if (shift < 23) {
  595. vpi = (vsid ^ pteg) & htab_hash_mask;
  596. seg_off |= vpi << shift;
  597. }
  598. *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  599. break;
  600. case MMU_SEGSIZE_1T:
  601. /* We only have 40 - 23 bits of seg_off in avpn */
  602. seg_off = (avpn & 0x1ffff) << 23;
  603. vsid = avpn >> 17;
  604. if (shift < 23) {
  605. vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
  606. seg_off |= vpi << shift;
  607. }
  608. *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  609. break;
  610. default:
  611. *vpn = size = 0;
  612. }
  613. *psize = size;
  614. *apsize = a_size;
  615. }
  616. /*
  617. * clear all mappings on kexec. All cpus are in real mode (or they will
  618. * be when they isi), and we are the only one left. We rely on our kernel
  619. * mapping being 0xC0's and the hardware ignoring those two real bits.
  620. *
  621. * This must be called with interrupts disabled.
  622. *
  623. * Taking the native_tlbie_lock is unsafe here due to the possibility of
  624. * lockdep being on. On pre POWER5 hardware, not taking the lock could
  625. * cause deadlock. POWER5 and newer not taking the lock is fine. This only
  626. * gets called during boot before secondary CPUs have come up and during
  627. * crashdump and all bets are off anyway.
  628. *
  629. * TODO: add batching support when enabled. remember, no dynamic memory here,
  630. * although there is the control page available...
  631. */
  632. static notrace void native_hpte_clear(void)
  633. {
  634. unsigned long vpn = 0;
  635. unsigned long slot, slots;
  636. struct hash_pte *hptep = htab_address;
  637. unsigned long hpte_v;
  638. unsigned long pteg_count;
  639. int psize, apsize, ssize;
  640. pteg_count = htab_hash_mask + 1;
  641. slots = pteg_count * HPTES_PER_GROUP;
  642. for (slot = 0; slot < slots; slot++, hptep++) {
  643. /*
  644. * we could lock the pte here, but we are the only cpu
  645. * running, right? and for crash dump, we probably
  646. * don't want to wait for a maybe bad cpu.
  647. */
  648. hpte_v = be64_to_cpu(hptep->v);
  649. /*
  650. * Call __tlbie() here rather than tlbie() since we can't take the
  651. * native_tlbie_lock.
  652. */
  653. if (hpte_v & HPTE_V_VALID) {
  654. hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
  655. hptep->v = 0;
  656. ___tlbie(vpn, psize, apsize, ssize);
  657. }
  658. }
  659. asm volatile("eieio; tlbsync; ptesync":::"memory");
  660. }
  661. /*
  662. * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
  663. * the lock all the time
  664. */
  665. static void native_flush_hash_range(unsigned long number, int local)
  666. {
  667. unsigned long vpn = 0;
  668. unsigned long hash, index, hidx, shift, slot;
  669. struct hash_pte *hptep;
  670. unsigned long hpte_v;
  671. unsigned long want_v;
  672. unsigned long flags;
  673. real_pte_t pte;
  674. struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
  675. unsigned long psize = batch->psize;
  676. int ssize = batch->ssize;
  677. int i;
  678. unsigned int use_local;
  679. use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
  680. mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
  681. local_irq_save(flags);
  682. for (i = 0; i < number; i++) {
  683. vpn = batch->vpn[i];
  684. pte = batch->pte[i];
  685. pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
  686. hash = hpt_hash(vpn, shift, ssize);
  687. hidx = __rpte_to_hidx(pte, index);
  688. if (hidx & _PTEIDX_SECONDARY)
  689. hash = ~hash;
  690. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  691. slot += hidx & _PTEIDX_GROUP_IX;
  692. hptep = htab_address + slot;
  693. want_v = hpte_encode_avpn(vpn, psize, ssize);
  694. hpte_v = hpte_get_old_v(hptep);
  695. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  696. continue;
  697. /* lock and try again */
  698. native_lock_hpte(hptep);
  699. hpte_v = hpte_get_old_v(hptep);
  700. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  701. native_unlock_hpte(hptep);
  702. else {
  703. release_hpte_lock();
  704. hptep->v = 0;
  705. }
  706. } pte_iterate_hashed_end();
  707. }
  708. if (use_local) {
  709. asm volatile("ptesync":::"memory");
  710. for (i = 0; i < number; i++) {
  711. vpn = batch->vpn[i];
  712. pte = batch->pte[i];
  713. pte_iterate_hashed_subpages(pte, psize,
  714. vpn, index, shift) {
  715. __tlbiel(vpn, psize, psize, ssize);
  716. } pte_iterate_hashed_end();
  717. }
  718. ppc_after_tlbiel_barrier();
  719. } else {
  720. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  721. if (lock_tlbie)
  722. raw_spin_lock(&native_tlbie_lock);
  723. asm volatile("ptesync":::"memory");
  724. for (i = 0; i < number; i++) {
  725. vpn = batch->vpn[i];
  726. pte = batch->pte[i];
  727. pte_iterate_hashed_subpages(pte, psize,
  728. vpn, index, shift) {
  729. __tlbie(vpn, psize, psize, ssize);
  730. } pte_iterate_hashed_end();
  731. }
  732. /*
  733. * Just do one more with the last used values.
  734. */
  735. fixup_tlbie_vpn(vpn, psize, psize, ssize);
  736. asm volatile("eieio; tlbsync; ptesync":::"memory");
  737. if (lock_tlbie)
  738. raw_spin_unlock(&native_tlbie_lock);
  739. }
  740. local_irq_restore(flags);
  741. }
  742. void __init hpte_init_native(void)
  743. {
  744. mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
  745. mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
  746. mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
  747. mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
  748. mmu_hash_ops.hpte_insert = native_hpte_insert;
  749. mmu_hash_ops.hpte_remove = native_hpte_remove;
  750. mmu_hash_ops.hpte_clear_all = native_hpte_clear;
  751. mmu_hash_ops.flush_hash_range = native_flush_hash_range;
  752. mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
  753. }