cacheflush.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/highmem.h>
  3. #include <linux/kprobes.h>
  4. /**
  5. * flush_coherent_icache() - if a CPU has a coherent icache, flush it
  6. * Return true if the cache was flushed, false otherwise
  7. */
  8. static inline bool flush_coherent_icache(void)
  9. {
  10. /*
  11. * For a snooping icache, we still need a dummy icbi to purge all the
  12. * prefetched instructions from the ifetch buffers. We also need a sync
  13. * before the icbi to order the actual stores to memory that might
  14. * have modified instructions with the icbi.
  15. */
  16. if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
  17. mb(); /* sync */
  18. icbi((void *)PAGE_OFFSET);
  19. mb(); /* sync */
  20. isync();
  21. return true;
  22. }
  23. return false;
  24. }
  25. /**
  26. * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
  27. * @start: the start address
  28. * @stop: the stop address (exclusive)
  29. */
  30. static void invalidate_icache_range(unsigned long start, unsigned long stop)
  31. {
  32. unsigned long shift = l1_icache_shift();
  33. unsigned long bytes = l1_icache_bytes();
  34. char *addr = (char *)(start & ~(bytes - 1));
  35. unsigned long size = stop - (unsigned long)addr + (bytes - 1);
  36. unsigned long i;
  37. for (i = 0; i < size >> shift; i++, addr += bytes)
  38. icbi(addr);
  39. mb(); /* sync */
  40. isync();
  41. }
  42. /**
  43. * flush_icache_range: Write any modified data cache blocks out to memory
  44. * and invalidate the corresponding blocks in the instruction cache
  45. *
  46. * Generic code will call this after writing memory, before executing from it.
  47. *
  48. * @start: the start address
  49. * @stop: the stop address (exclusive)
  50. */
  51. void flush_icache_range(unsigned long start, unsigned long stop)
  52. {
  53. if (flush_coherent_icache())
  54. return;
  55. clean_dcache_range(start, stop);
  56. if (IS_ENABLED(CONFIG_44x)) {
  57. /*
  58. * Flash invalidate on 44x because we are passed kmapped
  59. * addresses and this doesn't work for userspace pages due to
  60. * the virtually tagged icache.
  61. */
  62. iccci((void *)start);
  63. mb(); /* sync */
  64. isync();
  65. } else
  66. invalidate_icache_range(start, stop);
  67. }
  68. EXPORT_SYMBOL(flush_icache_range);
  69. #ifdef CONFIG_HIGHMEM
  70. /**
  71. * flush_dcache_icache_phys() - Flush a page by it's physical address
  72. * @physaddr: the physical address of the page
  73. */
  74. static void flush_dcache_icache_phys(unsigned long physaddr)
  75. {
  76. unsigned long bytes = l1_dcache_bytes();
  77. unsigned long nb = PAGE_SIZE / bytes;
  78. unsigned long addr = physaddr & PAGE_MASK;
  79. unsigned long msr, msr0;
  80. unsigned long loop1 = addr, loop2 = addr;
  81. msr0 = mfmsr();
  82. msr = msr0 & ~MSR_DR;
  83. /*
  84. * This must remain as ASM to prevent potential memory accesses
  85. * while the data MMU is disabled
  86. */
  87. asm volatile(
  88. " mtctr %2;\n"
  89. " mtmsr %3;\n"
  90. " isync;\n"
  91. "0: dcbst 0, %0;\n"
  92. " addi %0, %0, %4;\n"
  93. " bdnz 0b;\n"
  94. " sync;\n"
  95. " mtctr %2;\n"
  96. "1: icbi 0, %1;\n"
  97. " addi %1, %1, %4;\n"
  98. " bdnz 1b;\n"
  99. " sync;\n"
  100. " mtmsr %5;\n"
  101. " isync;\n"
  102. : "+&r" (loop1), "+&r" (loop2)
  103. : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
  104. : "ctr", "memory");
  105. }
  106. NOKPROBE_SYMBOL(flush_dcache_icache_phys)
  107. #else
  108. static void flush_dcache_icache_phys(unsigned long physaddr)
  109. {
  110. }
  111. #endif
  112. /**
  113. * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
  114. * Note: this is necessary because the instruction cache does *not*
  115. * snoop from the data cache.
  116. *
  117. * @p: the address of the page to flush
  118. */
  119. static void __flush_dcache_icache(void *p)
  120. {
  121. unsigned long addr = (unsigned long)p & PAGE_MASK;
  122. clean_dcache_range(addr, addr + PAGE_SIZE);
  123. /*
  124. * We don't flush the icache on 44x. Those have a virtual icache and we
  125. * don't have access to the virtual address here (it's not the page
  126. * vaddr but where it's mapped in user space). The flushing of the
  127. * icache on these is handled elsewhere, when a change in the address
  128. * space occurs, before returning to user space.
  129. */
  130. if (mmu_has_feature(MMU_FTR_TYPE_44x))
  131. return;
  132. invalidate_icache_range(addr, addr + PAGE_SIZE);
  133. }
  134. static void flush_dcache_icache_hugepage(struct page *page)
  135. {
  136. int i;
  137. int nr = compound_nr(page);
  138. if (!PageHighMem(page)) {
  139. for (i = 0; i < nr; i++)
  140. __flush_dcache_icache(lowmem_page_address(page + i));
  141. } else {
  142. for (i = 0; i < nr; i++) {
  143. void *start = kmap_local_page(page + i);
  144. __flush_dcache_icache(start);
  145. kunmap_local(start);
  146. }
  147. }
  148. }
  149. void flush_dcache_icache_page(struct page *page)
  150. {
  151. if (flush_coherent_icache())
  152. return;
  153. if (PageCompound(page))
  154. return flush_dcache_icache_hugepage(page);
  155. if (!PageHighMem(page)) {
  156. __flush_dcache_icache(lowmem_page_address(page));
  157. } else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
  158. void *start = kmap_local_page(page);
  159. __flush_dcache_icache(start);
  160. kunmap_local(start);
  161. } else {
  162. flush_dcache_icache_phys(page_to_phys(page));
  163. }
  164. }
  165. EXPORT_SYMBOL(flush_dcache_icache_page);
  166. void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
  167. {
  168. clear_page(page);
  169. /*
  170. * We shouldn't have to do this, but some versions of glibc
  171. * require it (ld.so assumes zero filled pages are icache clean)
  172. * - Anton
  173. */
  174. flush_dcache_page(pg);
  175. }
  176. EXPORT_SYMBOL(clear_user_page);
  177. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  178. struct page *pg)
  179. {
  180. copy_page(vto, vfrom);
  181. /*
  182. * We should be able to use the following optimisation, however
  183. * there are two problems.
  184. * Firstly a bug in some versions of binutils meant PLT sections
  185. * were not marked executable.
  186. * Secondly the first word in the GOT section is blrl, used
  187. * to establish the GOT address. Until recently the GOT was
  188. * not marked executable.
  189. * - Anton
  190. */
  191. #if 0
  192. if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
  193. return;
  194. #endif
  195. flush_dcache_page(pg);
  196. }
  197. void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
  198. unsigned long addr, int len)
  199. {
  200. void *maddr;
  201. maddr = kmap_local_page(page) + (addr & ~PAGE_MASK);
  202. flush_icache_range((unsigned long)maddr, (unsigned long)maddr + len);
  203. kunmap_local(maddr);
  204. }