cacheflush.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2009, Wind River Systems Inc
  7. * Implemented by [email protected] and [email protected]
  8. */
  9. #include <linux/export.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/pagemap.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/cpuinfo.h>
  16. static void __flush_dcache(unsigned long start, unsigned long end)
  17. {
  18. unsigned long addr;
  19. start &= ~(cpuinfo.dcache_line_size - 1);
  20. end += (cpuinfo.dcache_line_size - 1);
  21. end &= ~(cpuinfo.dcache_line_size - 1);
  22. if (end > start + cpuinfo.dcache_size)
  23. end = start + cpuinfo.dcache_size;
  24. for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
  25. __asm__ __volatile__ (" flushd 0(%0)\n"
  26. : /* Outputs */
  27. : /* Inputs */ "r"(addr)
  28. /* : No clobber */);
  29. }
  30. }
  31. static void __invalidate_dcache(unsigned long start, unsigned long end)
  32. {
  33. unsigned long addr;
  34. start &= ~(cpuinfo.dcache_line_size - 1);
  35. end += (cpuinfo.dcache_line_size - 1);
  36. end &= ~(cpuinfo.dcache_line_size - 1);
  37. for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
  38. __asm__ __volatile__ (" initda 0(%0)\n"
  39. : /* Outputs */
  40. : /* Inputs */ "r"(addr)
  41. /* : No clobber */);
  42. }
  43. }
  44. static void __flush_icache(unsigned long start, unsigned long end)
  45. {
  46. unsigned long addr;
  47. start &= ~(cpuinfo.icache_line_size - 1);
  48. end += (cpuinfo.icache_line_size - 1);
  49. end &= ~(cpuinfo.icache_line_size - 1);
  50. if (end > start + cpuinfo.icache_size)
  51. end = start + cpuinfo.icache_size;
  52. for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
  53. __asm__ __volatile__ (" flushi %0\n"
  54. : /* Outputs */
  55. : /* Inputs */ "r"(addr)
  56. /* : No clobber */);
  57. }
  58. __asm__ __volatile(" flushp\n");
  59. }
  60. static void flush_aliases(struct address_space *mapping, struct page *page)
  61. {
  62. struct mm_struct *mm = current->active_mm;
  63. struct vm_area_struct *mpnt;
  64. pgoff_t pgoff;
  65. pgoff = page->index;
  66. flush_dcache_mmap_lock(mapping);
  67. vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
  68. unsigned long offset;
  69. if (mpnt->vm_mm != mm)
  70. continue;
  71. if (!(mpnt->vm_flags & VM_MAYSHARE))
  72. continue;
  73. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  74. flush_cache_page(mpnt, mpnt->vm_start + offset,
  75. page_to_pfn(page));
  76. }
  77. flush_dcache_mmap_unlock(mapping);
  78. }
  79. void flush_cache_all(void)
  80. {
  81. __flush_dcache(0, cpuinfo.dcache_size);
  82. __flush_icache(0, cpuinfo.icache_size);
  83. }
  84. void flush_cache_mm(struct mm_struct *mm)
  85. {
  86. flush_cache_all();
  87. }
  88. void flush_cache_dup_mm(struct mm_struct *mm)
  89. {
  90. flush_cache_all();
  91. }
  92. void flush_icache_range(unsigned long start, unsigned long end)
  93. {
  94. __flush_dcache(start, end);
  95. __flush_icache(start, end);
  96. }
  97. void flush_dcache_range(unsigned long start, unsigned long end)
  98. {
  99. __flush_dcache(start, end);
  100. __flush_icache(start, end);
  101. }
  102. EXPORT_SYMBOL(flush_dcache_range);
  103. void invalidate_dcache_range(unsigned long start, unsigned long end)
  104. {
  105. __invalidate_dcache(start, end);
  106. }
  107. EXPORT_SYMBOL(invalidate_dcache_range);
  108. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  109. unsigned long end)
  110. {
  111. __flush_dcache(start, end);
  112. if (vma == NULL || (vma->vm_flags & VM_EXEC))
  113. __flush_icache(start, end);
  114. }
  115. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  116. {
  117. unsigned long start = (unsigned long) page_address(page);
  118. unsigned long end = start + PAGE_SIZE;
  119. __flush_dcache(start, end);
  120. __flush_icache(start, end);
  121. }
  122. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  123. unsigned long pfn)
  124. {
  125. unsigned long start = vmaddr;
  126. unsigned long end = start + PAGE_SIZE;
  127. __flush_dcache(start, end);
  128. if (vma->vm_flags & VM_EXEC)
  129. __flush_icache(start, end);
  130. }
  131. void __flush_dcache_page(struct address_space *mapping, struct page *page)
  132. {
  133. /*
  134. * Writeback any data associated with the kernel mapping of this
  135. * page. This ensures that data in the physical page is mutually
  136. * coherent with the kernels mapping.
  137. */
  138. unsigned long start = (unsigned long)page_address(page);
  139. __flush_dcache(start, start + PAGE_SIZE);
  140. }
  141. void flush_dcache_page(struct page *page)
  142. {
  143. struct address_space *mapping;
  144. /*
  145. * The zero page is never written to, so never has any dirty
  146. * cache lines, and therefore never needs to be flushed.
  147. */
  148. if (page == ZERO_PAGE(0))
  149. return;
  150. mapping = page_mapping_file(page);
  151. /* Flush this page if there are aliases. */
  152. if (mapping && !mapping_mapped(mapping)) {
  153. clear_bit(PG_dcache_clean, &page->flags);
  154. } else {
  155. __flush_dcache_page(mapping, page);
  156. if (mapping) {
  157. unsigned long start = (unsigned long)page_address(page);
  158. flush_aliases(mapping, page);
  159. flush_icache_range(start, start + PAGE_SIZE);
  160. }
  161. set_bit(PG_dcache_clean, &page->flags);
  162. }
  163. }
  164. EXPORT_SYMBOL(flush_dcache_page);
  165. void update_mmu_cache(struct vm_area_struct *vma,
  166. unsigned long address, pte_t *ptep)
  167. {
  168. pte_t pte = *ptep;
  169. unsigned long pfn = pte_pfn(pte);
  170. struct page *page;
  171. struct address_space *mapping;
  172. reload_tlb_page(vma, address, pte);
  173. if (!pfn_valid(pfn))
  174. return;
  175. /*
  176. * The zero page is never written to, so never has any dirty
  177. * cache lines, and therefore never needs to be flushed.
  178. */
  179. page = pfn_to_page(pfn);
  180. if (page == ZERO_PAGE(0))
  181. return;
  182. mapping = page_mapping_file(page);
  183. if (!test_and_set_bit(PG_dcache_clean, &page->flags))
  184. __flush_dcache_page(mapping, page);
  185. if(mapping)
  186. {
  187. flush_aliases(mapping, page);
  188. if (vma->vm_flags & VM_EXEC)
  189. flush_icache_page(vma, page);
  190. }
  191. }
  192. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  193. struct page *to)
  194. {
  195. __flush_dcache(vaddr, vaddr + PAGE_SIZE);
  196. __flush_icache(vaddr, vaddr + PAGE_SIZE);
  197. copy_page(vto, vfrom);
  198. __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
  199. __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
  200. }
  201. void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
  202. {
  203. __flush_dcache(vaddr, vaddr + PAGE_SIZE);
  204. __flush_icache(vaddr, vaddr + PAGE_SIZE);
  205. clear_page(addr);
  206. __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
  207. __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
  208. }
  209. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  210. unsigned long user_vaddr,
  211. void *dst, void *src, int len)
  212. {
  213. flush_cache_page(vma, user_vaddr, page_to_pfn(page));
  214. memcpy(dst, src, len);
  215. __flush_dcache((unsigned long)src, (unsigned long)src + len);
  216. if (vma->vm_flags & VM_EXEC)
  217. __flush_icache((unsigned long)src, (unsigned long)src + len);
  218. }
  219. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  220. unsigned long user_vaddr,
  221. void *dst, void *src, int len)
  222. {
  223. flush_cache_page(vma, user_vaddr, page_to_pfn(page));
  224. memcpy(dst, src, len);
  225. __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
  226. if (vma->vm_flags & VM_EXEC)
  227. __flush_icache((unsigned long)dst, (unsigned long)dst + len);
  228. }