cache.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1999-2006 Helge Deller <[email protected]> (07-13-1999)
  7. * Copyright (C) 1999 SuSE GmbH Nuernberg
  8. * Copyright (C) 2000 Philipp Rumpf ([email protected])
  9. *
  10. * Cache and TLB management
  11. *
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/module.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/sched/mm.h>
  21. #include <asm/pdc.h>
  22. #include <asm/cache.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlbflush.h>
  25. #include <asm/page.h>
  26. #include <asm/processor.h>
  27. #include <asm/sections.h>
  28. #include <asm/shmparam.h>
  29. #include <asm/mmu_context.h>
  30. int split_tlb __ro_after_init;
  31. int dcache_stride __ro_after_init;
  32. int icache_stride __ro_after_init;
  33. EXPORT_SYMBOL(dcache_stride);
  34. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  35. EXPORT_SYMBOL(flush_dcache_page_asm);
  36. void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  37. void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  38. /* Internal implementation in arch/parisc/kernel/pacache.S */
  39. void flush_data_cache_local(void *); /* flushes local data-cache only */
  40. void flush_instruction_cache_local(void); /* flushes local code-cache only */
  41. /* On some machines (i.e., ones with the Merced bus), there can be
  42. * only a single PxTLB broadcast at a time; this must be guaranteed
  43. * by software. We need a spinlock around all TLB flushes to ensure
  44. * this.
  45. */
  46. DEFINE_SPINLOCK(pa_tlb_flush_lock);
  47. #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
  48. int pa_serialize_tlb_flushes __ro_after_init;
  49. #endif
  50. struct pdc_cache_info cache_info __ro_after_init;
  51. #ifndef CONFIG_PA20
  52. static struct pdc_btlb_info btlb_info __ro_after_init;
  53. #endif
  54. DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
  55. DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
  56. DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
  57. static void cache_flush_local_cpu(void *dummy)
  58. {
  59. if (static_branch_likely(&parisc_has_icache))
  60. flush_instruction_cache_local();
  61. if (static_branch_likely(&parisc_has_dcache))
  62. flush_data_cache_local(NULL);
  63. }
  64. void flush_cache_all_local(void)
  65. {
  66. cache_flush_local_cpu(NULL);
  67. }
  68. void flush_cache_all(void)
  69. {
  70. if (static_branch_likely(&parisc_has_cache))
  71. on_each_cpu(cache_flush_local_cpu, NULL, 1);
  72. }
  73. static inline void flush_data_cache(void)
  74. {
  75. if (static_branch_likely(&parisc_has_dcache))
  76. on_each_cpu(flush_data_cache_local, NULL, 1);
  77. }
  78. /* Kernel virtual address of pfn. */
  79. #define pfn_va(pfn) __va(PFN_PHYS(pfn))
  80. void
  81. __update_cache(pte_t pte)
  82. {
  83. unsigned long pfn = pte_pfn(pte);
  84. struct page *page;
  85. /* We don't have pte special. As a result, we can be called with
  86. an invalid pfn and we don't need to flush the kernel dcache page.
  87. This occurs with FireGL card in C8000. */
  88. if (!pfn_valid(pfn))
  89. return;
  90. page = pfn_to_page(pfn);
  91. if (page_mapping_file(page) &&
  92. test_bit(PG_dcache_dirty, &page->flags)) {
  93. flush_kernel_dcache_page_addr(pfn_va(pfn));
  94. clear_bit(PG_dcache_dirty, &page->flags);
  95. } else if (parisc_requires_coherency())
  96. flush_kernel_dcache_page_addr(pfn_va(pfn));
  97. }
  98. void
  99. show_cache_info(struct seq_file *m)
  100. {
  101. char buf[32];
  102. seq_printf(m, "I-cache\t\t: %ld KB\n",
  103. cache_info.ic_size/1024 );
  104. if (cache_info.dc_loop != 1)
  105. snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
  106. seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
  107. cache_info.dc_size/1024,
  108. (cache_info.dc_conf.cc_wt ? "WT":"WB"),
  109. (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
  110. ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
  111. cache_info.dc_conf.cc_alias
  112. );
  113. seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
  114. cache_info.it_size,
  115. cache_info.dt_size,
  116. cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
  117. );
  118. #ifndef CONFIG_PA20
  119. /* BTLB - Block TLB */
  120. if (btlb_info.max_size==0) {
  121. seq_printf(m, "BTLB\t\t: not supported\n" );
  122. } else {
  123. seq_printf(m,
  124. "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
  125. "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
  126. "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
  127. btlb_info.max_size, (int)4096,
  128. btlb_info.max_size>>8,
  129. btlb_info.fixed_range_info.num_i,
  130. btlb_info.fixed_range_info.num_d,
  131. btlb_info.fixed_range_info.num_comb,
  132. btlb_info.variable_range_info.num_i,
  133. btlb_info.variable_range_info.num_d,
  134. btlb_info.variable_range_info.num_comb
  135. );
  136. }
  137. #endif
  138. }
  139. void __init
  140. parisc_cache_init(void)
  141. {
  142. if (pdc_cache_info(&cache_info) < 0)
  143. panic("parisc_cache_init: pdc_cache_info failed");
  144. #if 0
  145. printk("ic_size %lx dc_size %lx it_size %lx\n",
  146. cache_info.ic_size,
  147. cache_info.dc_size,
  148. cache_info.it_size);
  149. printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  150. cache_info.dc_base,
  151. cache_info.dc_stride,
  152. cache_info.dc_count,
  153. cache_info.dc_loop);
  154. printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  155. *(unsigned long *) (&cache_info.dc_conf),
  156. cache_info.dc_conf.cc_alias,
  157. cache_info.dc_conf.cc_block,
  158. cache_info.dc_conf.cc_line,
  159. cache_info.dc_conf.cc_shift);
  160. printk(" wt %d sh %d cst %d hv %d\n",
  161. cache_info.dc_conf.cc_wt,
  162. cache_info.dc_conf.cc_sh,
  163. cache_info.dc_conf.cc_cst,
  164. cache_info.dc_conf.cc_hv);
  165. printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
  166. cache_info.ic_base,
  167. cache_info.ic_stride,
  168. cache_info.ic_count,
  169. cache_info.ic_loop);
  170. printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
  171. cache_info.it_sp_base,
  172. cache_info.it_sp_stride,
  173. cache_info.it_sp_count,
  174. cache_info.it_loop,
  175. cache_info.it_off_base,
  176. cache_info.it_off_stride,
  177. cache_info.it_off_count);
  178. printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
  179. cache_info.dt_sp_base,
  180. cache_info.dt_sp_stride,
  181. cache_info.dt_sp_count,
  182. cache_info.dt_loop,
  183. cache_info.dt_off_base,
  184. cache_info.dt_off_stride,
  185. cache_info.dt_off_count);
  186. printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
  187. *(unsigned long *) (&cache_info.ic_conf),
  188. cache_info.ic_conf.cc_alias,
  189. cache_info.ic_conf.cc_block,
  190. cache_info.ic_conf.cc_line,
  191. cache_info.ic_conf.cc_shift);
  192. printk(" wt %d sh %d cst %d hv %d\n",
  193. cache_info.ic_conf.cc_wt,
  194. cache_info.ic_conf.cc_sh,
  195. cache_info.ic_conf.cc_cst,
  196. cache_info.ic_conf.cc_hv);
  197. printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
  198. cache_info.dt_conf.tc_sh,
  199. cache_info.dt_conf.tc_page,
  200. cache_info.dt_conf.tc_cst,
  201. cache_info.dt_conf.tc_aid,
  202. cache_info.dt_conf.tc_sr);
  203. printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
  204. cache_info.it_conf.tc_sh,
  205. cache_info.it_conf.tc_page,
  206. cache_info.it_conf.tc_cst,
  207. cache_info.it_conf.tc_aid,
  208. cache_info.it_conf.tc_sr);
  209. #endif
  210. split_tlb = 0;
  211. if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
  212. if (cache_info.dt_conf.tc_sh == 2)
  213. printk(KERN_WARNING "Unexpected TLB configuration. "
  214. "Will flush I/D separately (could be optimized).\n");
  215. split_tlb = 1;
  216. }
  217. /* "New and Improved" version from Jim Hull
  218. * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
  219. * The following CAFL_STRIDE is an optimized version, see
  220. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
  221. * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
  222. */
  223. #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
  224. dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
  225. icache_stride = CAFL_STRIDE(cache_info.ic_conf);
  226. #undef CAFL_STRIDE
  227. #ifndef CONFIG_PA20
  228. if (pdc_btlb_info(&btlb_info) < 0) {
  229. memset(&btlb_info, 0, sizeof btlb_info);
  230. }
  231. #endif
  232. if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
  233. PDC_MODEL_NVA_UNSUPPORTED) {
  234. printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
  235. #if 0
  236. panic("SMP kernel required to avoid non-equivalent aliasing");
  237. #endif
  238. }
  239. }
  240. void disable_sr_hashing(void)
  241. {
  242. int srhash_type, retval;
  243. unsigned long space_bits;
  244. switch (boot_cpu_data.cpu_type) {
  245. case pcx: /* We shouldn't get this far. setup.c should prevent it. */
  246. BUG();
  247. return;
  248. case pcxs:
  249. case pcxt:
  250. case pcxt_:
  251. srhash_type = SRHASH_PCXST;
  252. break;
  253. case pcxl:
  254. srhash_type = SRHASH_PCXL;
  255. break;
  256. case pcxl2: /* pcxl2 doesn't support space register hashing */
  257. return;
  258. default: /* Currently all PA2.0 machines use the same ins. sequence */
  259. srhash_type = SRHASH_PA20;
  260. break;
  261. }
  262. disable_sr_hashing_asm(srhash_type);
  263. retval = pdc_spaceid_bits(&space_bits);
  264. /* If this procedure isn't implemented, don't panic. */
  265. if (retval < 0 && retval != PDC_BAD_OPTION)
  266. panic("pdc_spaceid_bits call failed.\n");
  267. if (space_bits != 0)
  268. panic("SpaceID hashing is still on!\n");
  269. }
  270. static inline void
  271. __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  272. unsigned long physaddr)
  273. {
  274. if (!static_branch_likely(&parisc_has_cache))
  275. return;
  276. preempt_disable();
  277. flush_dcache_page_asm(physaddr, vmaddr);
  278. if (vma->vm_flags & VM_EXEC)
  279. flush_icache_page_asm(physaddr, vmaddr);
  280. preempt_enable();
  281. }
  282. static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
  283. {
  284. unsigned long flags, space, pgd, prot;
  285. #ifdef CONFIG_TLB_PTLOCK
  286. unsigned long pgd_lock;
  287. #endif
  288. vmaddr &= PAGE_MASK;
  289. preempt_disable();
  290. /* Set context for flush */
  291. local_irq_save(flags);
  292. prot = mfctl(8);
  293. space = mfsp(SR_USER);
  294. pgd = mfctl(25);
  295. #ifdef CONFIG_TLB_PTLOCK
  296. pgd_lock = mfctl(28);
  297. #endif
  298. switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
  299. local_irq_restore(flags);
  300. flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
  301. if (vma->vm_flags & VM_EXEC)
  302. flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
  303. flush_tlb_page(vma, vmaddr);
  304. /* Restore previous context */
  305. local_irq_save(flags);
  306. #ifdef CONFIG_TLB_PTLOCK
  307. mtctl(pgd_lock, 28);
  308. #endif
  309. mtctl(pgd, 25);
  310. mtsp(space, SR_USER);
  311. mtctl(prot, 8);
  312. local_irq_restore(flags);
  313. preempt_enable();
  314. }
  315. static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
  316. {
  317. pte_t *ptep = NULL;
  318. pgd_t *pgd = mm->pgd;
  319. p4d_t *p4d;
  320. pud_t *pud;
  321. pmd_t *pmd;
  322. if (!pgd_none(*pgd)) {
  323. p4d = p4d_offset(pgd, addr);
  324. if (!p4d_none(*p4d)) {
  325. pud = pud_offset(p4d, addr);
  326. if (!pud_none(*pud)) {
  327. pmd = pmd_offset(pud, addr);
  328. if (!pmd_none(*pmd))
  329. ptep = pte_offset_map(pmd, addr);
  330. }
  331. }
  332. }
  333. return ptep;
  334. }
  335. static inline bool pte_needs_flush(pte_t pte)
  336. {
  337. return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
  338. == (_PAGE_PRESENT | _PAGE_ACCESSED);
  339. }
  340. void flush_dcache_page(struct page *page)
  341. {
  342. struct address_space *mapping = page_mapping_file(page);
  343. struct vm_area_struct *mpnt;
  344. unsigned long offset;
  345. unsigned long addr, old_addr = 0;
  346. unsigned long count = 0;
  347. unsigned long flags;
  348. pgoff_t pgoff;
  349. if (mapping && !mapping_mapped(mapping)) {
  350. set_bit(PG_dcache_dirty, &page->flags);
  351. return;
  352. }
  353. flush_kernel_dcache_page_addr(page_address(page));
  354. if (!mapping)
  355. return;
  356. pgoff = page->index;
  357. /*
  358. * We have carefully arranged in arch_get_unmapped_area() that
  359. * *any* mappings of a file are always congruently mapped (whether
  360. * declared as MAP_PRIVATE or MAP_SHARED), so we only need
  361. * to flush one address here for them all to become coherent
  362. * on machines that support equivalent aliasing
  363. */
  364. flush_dcache_mmap_lock_irqsave(mapping, flags);
  365. vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  366. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  367. addr = mpnt->vm_start + offset;
  368. if (parisc_requires_coherency()) {
  369. pte_t *ptep;
  370. ptep = get_ptep(mpnt->vm_mm, addr);
  371. if (ptep && pte_needs_flush(*ptep))
  372. flush_user_cache_page(mpnt, addr);
  373. } else {
  374. /*
  375. * The TLB is the engine of coherence on parisc:
  376. * The CPU is entitled to speculate any page
  377. * with a TLB mapping, so here we kill the
  378. * mapping then flush the page along a special
  379. * flush only alias mapping. This guarantees that
  380. * the page is no-longer in the cache for any
  381. * process and nor may it be speculatively read
  382. * in (until the user or kernel specifically
  383. * accesses it, of course)
  384. */
  385. flush_tlb_page(mpnt, addr);
  386. if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
  387. != (addr & (SHM_COLOUR - 1))) {
  388. __flush_cache_page(mpnt, addr, page_to_phys(page));
  389. /*
  390. * Software is allowed to have any number
  391. * of private mappings to a page.
  392. */
  393. if (!(mpnt->vm_flags & VM_SHARED))
  394. continue;
  395. if (old_addr)
  396. pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
  397. old_addr, addr, mpnt->vm_file);
  398. old_addr = addr;
  399. }
  400. }
  401. WARN_ON(++count == 4096);
  402. }
  403. flush_dcache_mmap_unlock_irqrestore(mapping, flags);
  404. }
  405. EXPORT_SYMBOL(flush_dcache_page);
  406. /* Defined in arch/parisc/kernel/pacache.S */
  407. EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
  408. EXPORT_SYMBOL(flush_kernel_icache_range_asm);
  409. #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
  410. static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
  411. #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
  412. static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
  413. void __init parisc_setup_cache_timing(void)
  414. {
  415. unsigned long rangetime, alltime;
  416. unsigned long size;
  417. unsigned long threshold, threshold2;
  418. alltime = mfctl(16);
  419. flush_data_cache();
  420. alltime = mfctl(16) - alltime;
  421. size = (unsigned long)(_end - _text);
  422. rangetime = mfctl(16);
  423. flush_kernel_dcache_range((unsigned long)_text, size);
  424. rangetime = mfctl(16) - rangetime;
  425. printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
  426. alltime, size, rangetime);
  427. threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
  428. pr_info("Calculated flush threshold is %lu KiB\n",
  429. threshold/1024);
  430. /*
  431. * The threshold computed above isn't very reliable. The following
  432. * heuristic works reasonably well on c8000/rp3440.
  433. */
  434. threshold2 = cache_info.dc_size * num_online_cpus();
  435. parisc_cache_flush_threshold = threshold2;
  436. printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
  437. parisc_cache_flush_threshold/1024);
  438. /* calculate TLB flush threshold */
  439. /* On SMP machines, skip the TLB measure of kernel text which
  440. * has been mapped as huge pages. */
  441. if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
  442. threshold = max(cache_info.it_size, cache_info.dt_size);
  443. threshold *= PAGE_SIZE;
  444. threshold /= num_online_cpus();
  445. goto set_tlb_threshold;
  446. }
  447. size = (unsigned long)_end - (unsigned long)_text;
  448. rangetime = mfctl(16);
  449. flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
  450. rangetime = mfctl(16) - rangetime;
  451. alltime = mfctl(16);
  452. flush_tlb_all();
  453. alltime = mfctl(16) - alltime;
  454. printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
  455. alltime, size, rangetime);
  456. threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
  457. printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
  458. threshold/1024);
  459. set_tlb_threshold:
  460. if (threshold > FLUSH_TLB_THRESHOLD)
  461. parisc_tlb_flush_threshold = threshold;
  462. else
  463. parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
  464. printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
  465. parisc_tlb_flush_threshold/1024);
  466. }
  467. extern void purge_kernel_dcache_page_asm(unsigned long);
  468. extern void clear_user_page_asm(void *, unsigned long);
  469. extern void copy_user_page_asm(void *, void *, unsigned long);
  470. void flush_kernel_dcache_page_addr(const void *addr)
  471. {
  472. unsigned long flags;
  473. flush_kernel_dcache_page_asm(addr);
  474. purge_tlb_start(flags);
  475. pdtlb(SR_KERNEL, addr);
  476. purge_tlb_end(flags);
  477. }
  478. EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
  479. static void flush_cache_page_if_present(struct vm_area_struct *vma,
  480. unsigned long vmaddr, unsigned long pfn)
  481. {
  482. pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
  483. /*
  484. * The pte check is racy and sometimes the flush will trigger
  485. * a non-access TLB miss. Hopefully, the page has already been
  486. * flushed.
  487. */
  488. if (ptep && pte_needs_flush(*ptep))
  489. flush_cache_page(vma, vmaddr, pfn);
  490. }
  491. void copy_user_highpage(struct page *to, struct page *from,
  492. unsigned long vaddr, struct vm_area_struct *vma)
  493. {
  494. void *kto, *kfrom;
  495. kfrom = kmap_local_page(from);
  496. kto = kmap_local_page(to);
  497. flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
  498. copy_page_asm(kto, kfrom);
  499. kunmap_local(kto);
  500. kunmap_local(kfrom);
  501. }
  502. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  503. unsigned long user_vaddr, void *dst, void *src, int len)
  504. {
  505. flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
  506. memcpy(dst, src, len);
  507. flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
  508. }
  509. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  510. unsigned long user_vaddr, void *dst, void *src, int len)
  511. {
  512. flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
  513. memcpy(dst, src, len);
  514. }
  515. /* __flush_tlb_range()
  516. *
  517. * returns 1 if all TLBs were flushed.
  518. */
  519. int __flush_tlb_range(unsigned long sid, unsigned long start,
  520. unsigned long end)
  521. {
  522. unsigned long flags;
  523. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  524. end - start >= parisc_tlb_flush_threshold) {
  525. flush_tlb_all();
  526. return 1;
  527. }
  528. /* Purge TLB entries for small ranges using the pdtlb and
  529. pitlb instructions. These instructions execute locally
  530. but cause a purge request to be broadcast to other TLBs. */
  531. while (start < end) {
  532. purge_tlb_start(flags);
  533. mtsp(sid, SR_TEMP1);
  534. pdtlb(SR_TEMP1, start);
  535. pitlb(SR_TEMP1, start);
  536. purge_tlb_end(flags);
  537. start += PAGE_SIZE;
  538. }
  539. return 0;
  540. }
  541. static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  542. {
  543. unsigned long addr, pfn;
  544. pte_t *ptep;
  545. for (addr = start; addr < end; addr += PAGE_SIZE) {
  546. /*
  547. * The vma can contain pages that aren't present. Although
  548. * the pte search is expensive, we need the pte to find the
  549. * page pfn and to check whether the page should be flushed.
  550. */
  551. ptep = get_ptep(vma->vm_mm, addr);
  552. if (ptep && pte_needs_flush(*ptep)) {
  553. if (parisc_requires_coherency()) {
  554. flush_user_cache_page(vma, addr);
  555. } else {
  556. pfn = pte_pfn(*ptep);
  557. if (WARN_ON(!pfn_valid(pfn)))
  558. return;
  559. __flush_cache_page(vma, addr, PFN_PHYS(pfn));
  560. }
  561. }
  562. }
  563. }
  564. static inline unsigned long mm_total_size(struct mm_struct *mm)
  565. {
  566. struct vm_area_struct *vma;
  567. unsigned long usize = 0;
  568. VMA_ITERATOR(vmi, mm, 0);
  569. for_each_vma(vmi, vma) {
  570. if (usize >= parisc_cache_flush_threshold)
  571. break;
  572. usize += vma->vm_end - vma->vm_start;
  573. }
  574. return usize;
  575. }
  576. void flush_cache_mm(struct mm_struct *mm)
  577. {
  578. struct vm_area_struct *vma;
  579. VMA_ITERATOR(vmi, mm, 0);
  580. /*
  581. * Flushing the whole cache on each cpu takes forever on
  582. * rp3440, etc. So, avoid it if the mm isn't too big.
  583. *
  584. * Note that we must flush the entire cache on machines
  585. * with aliasing caches to prevent random segmentation
  586. * faults.
  587. */
  588. if (!parisc_requires_coherency()
  589. || mm_total_size(mm) >= parisc_cache_flush_threshold) {
  590. if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
  591. return;
  592. flush_tlb_all();
  593. flush_cache_all();
  594. return;
  595. }
  596. /* Flush mm */
  597. for_each_vma(vmi, vma)
  598. flush_cache_pages(vma, vma->vm_start, vma->vm_end);
  599. }
  600. void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  601. {
  602. if (!parisc_requires_coherency()
  603. || end - start >= parisc_cache_flush_threshold) {
  604. if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
  605. return;
  606. flush_tlb_range(vma, start, end);
  607. flush_cache_all();
  608. return;
  609. }
  610. flush_cache_pages(vma, start, end);
  611. }
  612. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  613. {
  614. if (WARN_ON(!pfn_valid(pfn)))
  615. return;
  616. if (parisc_requires_coherency())
  617. flush_user_cache_page(vma, vmaddr);
  618. else
  619. __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
  620. }
  621. void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  622. {
  623. if (!PageAnon(page))
  624. return;
  625. if (parisc_requires_coherency()) {
  626. if (vma->vm_flags & VM_SHARED)
  627. flush_data_cache();
  628. else
  629. flush_user_cache_page(vma, vmaddr);
  630. return;
  631. }
  632. flush_tlb_page(vma, vmaddr);
  633. preempt_disable();
  634. flush_dcache_page_asm(page_to_phys(page), vmaddr);
  635. preempt_enable();
  636. }
  637. void flush_kernel_vmap_range(void *vaddr, int size)
  638. {
  639. unsigned long start = (unsigned long)vaddr;
  640. unsigned long end = start + size;
  641. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  642. (unsigned long)size >= parisc_cache_flush_threshold) {
  643. flush_tlb_kernel_range(start, end);
  644. flush_data_cache();
  645. return;
  646. }
  647. flush_kernel_dcache_range_asm(start, end);
  648. flush_tlb_kernel_range(start, end);
  649. }
  650. EXPORT_SYMBOL(flush_kernel_vmap_range);
  651. void invalidate_kernel_vmap_range(void *vaddr, int size)
  652. {
  653. unsigned long start = (unsigned long)vaddr;
  654. unsigned long end = start + size;
  655. /* Ensure DMA is complete */
  656. asm_syncdma();
  657. if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
  658. (unsigned long)size >= parisc_cache_flush_threshold) {
  659. flush_tlb_kernel_range(start, end);
  660. flush_data_cache();
  661. return;
  662. }
  663. purge_kernel_dcache_range_asm(start, end);
  664. flush_tlb_kernel_range(start, end);
  665. }
  666. EXPORT_SYMBOL(invalidate_kernel_vmap_range);