tlb-r4k.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996 David S. Miller ([email protected])
  7. * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle [email protected]
  8. * Carsten Langgaard, [email protected]
  9. * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
  10. */
  11. #include <linux/cpu_pm.h>
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp.h>
  15. #include <linux/mm.h>
  16. #include <linux/hugetlb.h>
  17. #include <linux/export.h>
  18. #include <asm/cpu.h>
  19. #include <asm/cpu-type.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/hazards.h>
  22. #include <asm/mmu_context.h>
  23. #include <asm/tlb.h>
  24. #include <asm/tlbmisc.h>
  25. extern void build_tlb_refill_handler(void);
  26. /*
  27. * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
  28. * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
  29. * itlb/dtlb are not totally transparent to software.
  30. */
  31. static inline void flush_micro_tlb(void)
  32. {
  33. switch (current_cpu_type()) {
  34. case CPU_LOONGSON2EF:
  35. write_c0_diag(LOONGSON_DIAG_ITLB);
  36. break;
  37. case CPU_LOONGSON64:
  38. write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
  39. break;
  40. default:
  41. break;
  42. }
  43. }
  44. static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
  45. {
  46. if (vma->vm_flags & VM_EXEC)
  47. flush_micro_tlb();
  48. }
  49. void local_flush_tlb_all(void)
  50. {
  51. unsigned long flags;
  52. unsigned long old_ctx;
  53. int entry, ftlbhighset;
  54. local_irq_save(flags);
  55. /* Save old context and create impossible VPN2 value */
  56. old_ctx = read_c0_entryhi();
  57. htw_stop();
  58. write_c0_entrylo0(0);
  59. write_c0_entrylo1(0);
  60. entry = num_wired_entries();
  61. /*
  62. * Blast 'em all away.
  63. * If there are any wired entries, fall back to iterating
  64. */
  65. if (cpu_has_tlbinv && !entry) {
  66. if (current_cpu_data.tlbsizevtlb) {
  67. write_c0_index(0);
  68. mtc0_tlbw_hazard();
  69. tlbinvf(); /* invalidate VTLB */
  70. }
  71. ftlbhighset = current_cpu_data.tlbsizevtlb +
  72. current_cpu_data.tlbsizeftlbsets;
  73. for (entry = current_cpu_data.tlbsizevtlb;
  74. entry < ftlbhighset;
  75. entry++) {
  76. write_c0_index(entry);
  77. mtc0_tlbw_hazard();
  78. tlbinvf(); /* invalidate one FTLB set */
  79. }
  80. } else {
  81. while (entry < current_cpu_data.tlbsize) {
  82. /* Make sure all entries differ. */
  83. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  84. write_c0_index(entry);
  85. mtc0_tlbw_hazard();
  86. tlb_write_indexed();
  87. entry++;
  88. }
  89. }
  90. tlbw_use_hazard();
  91. write_c0_entryhi(old_ctx);
  92. htw_start();
  93. flush_micro_tlb();
  94. local_irq_restore(flags);
  95. }
  96. EXPORT_SYMBOL(local_flush_tlb_all);
  97. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  98. unsigned long end)
  99. {
  100. struct mm_struct *mm = vma->vm_mm;
  101. int cpu = smp_processor_id();
  102. if (cpu_context(cpu, mm) != 0) {
  103. unsigned long size, flags;
  104. local_irq_save(flags);
  105. start = round_down(start, PAGE_SIZE << 1);
  106. end = round_up(end, PAGE_SIZE << 1);
  107. size = (end - start) >> (PAGE_SHIFT + 1);
  108. if (size <= (current_cpu_data.tlbsizeftlbsets ?
  109. current_cpu_data.tlbsize / 8 :
  110. current_cpu_data.tlbsize / 2)) {
  111. unsigned long old_entryhi, old_mmid;
  112. int newpid = cpu_asid(cpu, mm);
  113. old_entryhi = read_c0_entryhi();
  114. if (cpu_has_mmid) {
  115. old_mmid = read_c0_memorymapid();
  116. write_c0_memorymapid(newpid);
  117. }
  118. htw_stop();
  119. while (start < end) {
  120. int idx;
  121. if (cpu_has_mmid)
  122. write_c0_entryhi(start);
  123. else
  124. write_c0_entryhi(start | newpid);
  125. start += (PAGE_SIZE << 1);
  126. mtc0_tlbw_hazard();
  127. tlb_probe();
  128. tlb_probe_hazard();
  129. idx = read_c0_index();
  130. write_c0_entrylo0(0);
  131. write_c0_entrylo1(0);
  132. if (idx < 0)
  133. continue;
  134. /* Make sure all entries differ. */
  135. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  136. mtc0_tlbw_hazard();
  137. tlb_write_indexed();
  138. }
  139. tlbw_use_hazard();
  140. write_c0_entryhi(old_entryhi);
  141. if (cpu_has_mmid)
  142. write_c0_memorymapid(old_mmid);
  143. htw_start();
  144. } else {
  145. drop_mmu_context(mm);
  146. }
  147. flush_micro_tlb();
  148. local_irq_restore(flags);
  149. }
  150. }
  151. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  152. {
  153. unsigned long size, flags;
  154. local_irq_save(flags);
  155. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  156. size = (size + 1) >> 1;
  157. if (size <= (current_cpu_data.tlbsizeftlbsets ?
  158. current_cpu_data.tlbsize / 8 :
  159. current_cpu_data.tlbsize / 2)) {
  160. int pid = read_c0_entryhi();
  161. start &= (PAGE_MASK << 1);
  162. end += ((PAGE_SIZE << 1) - 1);
  163. end &= (PAGE_MASK << 1);
  164. htw_stop();
  165. while (start < end) {
  166. int idx;
  167. write_c0_entryhi(start);
  168. start += (PAGE_SIZE << 1);
  169. mtc0_tlbw_hazard();
  170. tlb_probe();
  171. tlb_probe_hazard();
  172. idx = read_c0_index();
  173. write_c0_entrylo0(0);
  174. write_c0_entrylo1(0);
  175. if (idx < 0)
  176. continue;
  177. /* Make sure all entries differ. */
  178. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  179. mtc0_tlbw_hazard();
  180. tlb_write_indexed();
  181. }
  182. tlbw_use_hazard();
  183. write_c0_entryhi(pid);
  184. htw_start();
  185. } else {
  186. local_flush_tlb_all();
  187. }
  188. flush_micro_tlb();
  189. local_irq_restore(flags);
  190. }
  191. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  192. {
  193. int cpu = smp_processor_id();
  194. if (cpu_context(cpu, vma->vm_mm) != 0) {
  195. unsigned long old_mmid;
  196. unsigned long flags, old_entryhi;
  197. int idx;
  198. page &= (PAGE_MASK << 1);
  199. local_irq_save(flags);
  200. old_entryhi = read_c0_entryhi();
  201. htw_stop();
  202. if (cpu_has_mmid) {
  203. old_mmid = read_c0_memorymapid();
  204. write_c0_entryhi(page);
  205. write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
  206. } else {
  207. write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
  208. }
  209. mtc0_tlbw_hazard();
  210. tlb_probe();
  211. tlb_probe_hazard();
  212. idx = read_c0_index();
  213. write_c0_entrylo0(0);
  214. write_c0_entrylo1(0);
  215. if (idx < 0)
  216. goto finish;
  217. /* Make sure all entries differ. */
  218. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  219. mtc0_tlbw_hazard();
  220. tlb_write_indexed();
  221. tlbw_use_hazard();
  222. finish:
  223. write_c0_entryhi(old_entryhi);
  224. if (cpu_has_mmid)
  225. write_c0_memorymapid(old_mmid);
  226. htw_start();
  227. flush_micro_tlb_vm(vma);
  228. local_irq_restore(flags);
  229. }
  230. }
  231. /*
  232. * This one is only used for pages with the global bit set so we don't care
  233. * much about the ASID.
  234. */
  235. void local_flush_tlb_one(unsigned long page)
  236. {
  237. unsigned long flags;
  238. int oldpid, idx;
  239. local_irq_save(flags);
  240. oldpid = read_c0_entryhi();
  241. htw_stop();
  242. page &= (PAGE_MASK << 1);
  243. write_c0_entryhi(page);
  244. mtc0_tlbw_hazard();
  245. tlb_probe();
  246. tlb_probe_hazard();
  247. idx = read_c0_index();
  248. write_c0_entrylo0(0);
  249. write_c0_entrylo1(0);
  250. if (idx >= 0) {
  251. /* Make sure all entries differ. */
  252. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  253. mtc0_tlbw_hazard();
  254. tlb_write_indexed();
  255. tlbw_use_hazard();
  256. }
  257. write_c0_entryhi(oldpid);
  258. htw_start();
  259. flush_micro_tlb();
  260. local_irq_restore(flags);
  261. }
  262. /*
  263. * We will need multiple versions of update_mmu_cache(), one that just
  264. * updates the TLB with the new pte(s), and another which also checks
  265. * for the R4k "end of page" hardware bug and does the needy.
  266. */
  267. void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
  268. {
  269. unsigned long flags;
  270. pgd_t *pgdp;
  271. p4d_t *p4dp;
  272. pud_t *pudp;
  273. pmd_t *pmdp;
  274. pte_t *ptep;
  275. int idx, pid;
  276. /*
  277. * Handle debugger faulting in for debugee.
  278. */
  279. if (current->active_mm != vma->vm_mm)
  280. return;
  281. local_irq_save(flags);
  282. htw_stop();
  283. address &= (PAGE_MASK << 1);
  284. if (cpu_has_mmid) {
  285. write_c0_entryhi(address);
  286. } else {
  287. pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
  288. write_c0_entryhi(address | pid);
  289. }
  290. pgdp = pgd_offset(vma->vm_mm, address);
  291. mtc0_tlbw_hazard();
  292. tlb_probe();
  293. tlb_probe_hazard();
  294. p4dp = p4d_offset(pgdp, address);
  295. pudp = pud_offset(p4dp, address);
  296. pmdp = pmd_offset(pudp, address);
  297. idx = read_c0_index();
  298. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  299. /* this could be a huge page */
  300. if (pmd_huge(*pmdp)) {
  301. unsigned long lo;
  302. write_c0_pagemask(PM_HUGE_MASK);
  303. ptep = (pte_t *)pmdp;
  304. lo = pte_to_entrylo(pte_val(*ptep));
  305. write_c0_entrylo0(lo);
  306. write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
  307. mtc0_tlbw_hazard();
  308. if (idx < 0)
  309. tlb_write_random();
  310. else
  311. tlb_write_indexed();
  312. tlbw_use_hazard();
  313. write_c0_pagemask(PM_DEFAULT_MASK);
  314. } else
  315. #endif
  316. {
  317. ptep = pte_offset_map(pmdp, address);
  318. #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
  319. #ifdef CONFIG_XPA
  320. write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
  321. if (cpu_has_xpa)
  322. writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
  323. ptep++;
  324. write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
  325. if (cpu_has_xpa)
  326. writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
  327. #else
  328. write_c0_entrylo0(ptep->pte_high);
  329. ptep++;
  330. write_c0_entrylo1(ptep->pte_high);
  331. #endif
  332. #else
  333. write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
  334. write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
  335. #endif
  336. mtc0_tlbw_hazard();
  337. if (idx < 0)
  338. tlb_write_random();
  339. else
  340. tlb_write_indexed();
  341. }
  342. tlbw_use_hazard();
  343. htw_start();
  344. flush_micro_tlb_vm(vma);
  345. local_irq_restore(flags);
  346. }
  347. void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
  348. unsigned long entryhi, unsigned long pagemask)
  349. {
  350. #ifdef CONFIG_XPA
  351. panic("Broken for XPA kernels");
  352. #else
  353. unsigned int old_mmid;
  354. unsigned long flags;
  355. unsigned long wired;
  356. unsigned long old_pagemask;
  357. unsigned long old_ctx;
  358. local_irq_save(flags);
  359. if (cpu_has_mmid) {
  360. old_mmid = read_c0_memorymapid();
  361. write_c0_memorymapid(MMID_KERNEL_WIRED);
  362. }
  363. /* Save old context and create impossible VPN2 value */
  364. old_ctx = read_c0_entryhi();
  365. htw_stop();
  366. old_pagemask = read_c0_pagemask();
  367. wired = num_wired_entries();
  368. write_c0_wired(wired + 1);
  369. write_c0_index(wired);
  370. tlbw_use_hazard(); /* What is the hazard here? */
  371. write_c0_pagemask(pagemask);
  372. write_c0_entryhi(entryhi);
  373. write_c0_entrylo0(entrylo0);
  374. write_c0_entrylo1(entrylo1);
  375. mtc0_tlbw_hazard();
  376. tlb_write_indexed();
  377. tlbw_use_hazard();
  378. write_c0_entryhi(old_ctx);
  379. if (cpu_has_mmid)
  380. write_c0_memorymapid(old_mmid);
  381. tlbw_use_hazard(); /* What is the hazard here? */
  382. htw_start();
  383. write_c0_pagemask(old_pagemask);
  384. local_flush_tlb_all();
  385. local_irq_restore(flags);
  386. #endif
  387. }
  388. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  389. int has_transparent_hugepage(void)
  390. {
  391. static unsigned int mask = -1;
  392. if (mask == -1) { /* first call comes during __init */
  393. unsigned long flags;
  394. local_irq_save(flags);
  395. write_c0_pagemask(PM_HUGE_MASK);
  396. back_to_back_c0_hazard();
  397. mask = read_c0_pagemask();
  398. write_c0_pagemask(PM_DEFAULT_MASK);
  399. local_irq_restore(flags);
  400. }
  401. return mask == PM_HUGE_MASK;
  402. }
  403. EXPORT_SYMBOL(has_transparent_hugepage);
  404. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  405. /*
  406. * Used for loading TLB entries before trap_init() has started, when we
  407. * don't actually want to add a wired entry which remains throughout the
  408. * lifetime of the system
  409. */
  410. int temp_tlb_entry;
  411. __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
  412. unsigned long entryhi, unsigned long pagemask)
  413. {
  414. int ret = 0;
  415. unsigned long flags;
  416. unsigned long wired;
  417. unsigned long old_pagemask;
  418. unsigned long old_ctx;
  419. local_irq_save(flags);
  420. /* Save old context and create impossible VPN2 value */
  421. htw_stop();
  422. old_ctx = read_c0_entryhi();
  423. old_pagemask = read_c0_pagemask();
  424. wired = num_wired_entries();
  425. if (--temp_tlb_entry < wired) {
  426. printk(KERN_WARNING
  427. "No TLB space left for add_temporary_entry\n");
  428. ret = -ENOSPC;
  429. goto out;
  430. }
  431. write_c0_index(temp_tlb_entry);
  432. write_c0_pagemask(pagemask);
  433. write_c0_entryhi(entryhi);
  434. write_c0_entrylo0(entrylo0);
  435. write_c0_entrylo1(entrylo1);
  436. mtc0_tlbw_hazard();
  437. tlb_write_indexed();
  438. tlbw_use_hazard();
  439. write_c0_entryhi(old_ctx);
  440. write_c0_pagemask(old_pagemask);
  441. htw_start();
  442. out:
  443. local_irq_restore(flags);
  444. return ret;
  445. }
  446. static int ntlb;
  447. static int __init set_ntlb(char *str)
  448. {
  449. get_option(&str, &ntlb);
  450. return 1;
  451. }
  452. __setup("ntlb=", set_ntlb);
  453. /*
  454. * Configure TLB (for init or after a CPU has been powered off).
  455. */
  456. static void r4k_tlb_configure(void)
  457. {
  458. /*
  459. * You should never change this register:
  460. * - On R4600 1.7 the tlbp never hits for pages smaller than
  461. * the value in the c0_pagemask register.
  462. * - The entire mm handling assumes the c0_pagemask register to
  463. * be set to fixed-size pages.
  464. */
  465. write_c0_pagemask(PM_DEFAULT_MASK);
  466. back_to_back_c0_hazard();
  467. if (read_c0_pagemask() != PM_DEFAULT_MASK)
  468. panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
  469. write_c0_wired(0);
  470. if (current_cpu_type() == CPU_R10000 ||
  471. current_cpu_type() == CPU_R12000 ||
  472. current_cpu_type() == CPU_R14000 ||
  473. current_cpu_type() == CPU_R16000)
  474. write_c0_framemask(0);
  475. if (cpu_has_rixi) {
  476. /*
  477. * Enable the no read, no exec bits, and enable large physical
  478. * address.
  479. */
  480. #ifdef CONFIG_64BIT
  481. set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
  482. #else
  483. set_c0_pagegrain(PG_RIE | PG_XIE);
  484. #endif
  485. }
  486. temp_tlb_entry = current_cpu_data.tlbsize - 1;
  487. /* From this point on the ARC firmware is dead. */
  488. local_flush_tlb_all();
  489. /* Did I tell you that ARC SUCKS? */
  490. }
  491. void tlb_init(void)
  492. {
  493. r4k_tlb_configure();
  494. if (ntlb) {
  495. if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
  496. int wired = current_cpu_data.tlbsize - ntlb;
  497. write_c0_wired(wired);
  498. write_c0_index(wired-1);
  499. printk("Restricting TLB to %d entries\n", ntlb);
  500. } else
  501. printk("Ignoring invalid argument ntlb=%d\n", ntlb);
  502. }
  503. build_tlb_refill_handler();
  504. }
  505. static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
  506. void *v)
  507. {
  508. switch (cmd) {
  509. case CPU_PM_ENTER_FAILED:
  510. case CPU_PM_EXIT:
  511. r4k_tlb_configure();
  512. break;
  513. }
  514. return NOTIFY_OK;
  515. }
  516. static struct notifier_block r4k_tlb_pm_notifier_block = {
  517. .notifier_call = r4k_tlb_pm_notifier,
  518. };
  519. static int __init r4k_tlb_init_pm(void)
  520. {
  521. return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
  522. }
  523. arch_initcall(r4k_tlb_init_pm);