cacheflush.h 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _PARISC_CACHEFLUSH_H
  3. #define _PARISC_CACHEFLUSH_H
  4. #include <linux/mm.h>
  5. #include <linux/uaccess.h>
  6. #include <asm/tlbflush.h>
  7. /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  8. * Unfortunately, that doesn't apply to PA-RISC. */
  9. #include <linux/jump_label.h>
  10. DECLARE_STATIC_KEY_TRUE(parisc_has_cache);
  11. DECLARE_STATIC_KEY_TRUE(parisc_has_dcache);
  12. DECLARE_STATIC_KEY_TRUE(parisc_has_icache);
  13. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  14. void flush_user_icache_range_asm(unsigned long, unsigned long);
  15. void flush_kernel_icache_range_asm(unsigned long, unsigned long);
  16. void flush_user_dcache_range_asm(unsigned long, unsigned long);
  17. void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
  18. void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
  19. void flush_kernel_dcache_page_asm(const void *addr);
  20. void flush_kernel_icache_page(void *);
  21. /* Cache flush operations */
  22. void flush_cache_all_local(void);
  23. void flush_cache_all(void);
  24. void flush_cache_mm(struct mm_struct *mm);
  25. void flush_kernel_dcache_page_addr(const void *addr);
  26. #define flush_kernel_dcache_range(start,size) \
  27. flush_kernel_dcache_range_asm((start), (start)+(size));
  28. #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
  29. void flush_kernel_vmap_range(void *vaddr, int size);
  30. void invalidate_kernel_vmap_range(void *vaddr, int size);
  31. #define flush_cache_vmap(start, end) flush_cache_all()
  32. #define flush_cache_vunmap(start, end) flush_cache_all()
  33. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  34. void flush_dcache_page(struct page *page);
  35. #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
  36. #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
  37. #define flush_dcache_mmap_lock_irqsave(mapping, flags) \
  38. xa_lock_irqsave(&mapping->i_pages, flags)
  39. #define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
  40. xa_unlock_irqrestore(&mapping->i_pages, flags)
  41. #define flush_icache_page(vma,page) do { \
  42. flush_kernel_dcache_page_addr(page_address(page)); \
  43. flush_kernel_icache_page(page_address(page)); \
  44. } while (0)
  45. #define flush_icache_range(s,e) do { \
  46. flush_kernel_dcache_range_asm(s,e); \
  47. flush_kernel_icache_range_asm(s,e); \
  48. } while (0)
  49. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  50. unsigned long user_vaddr, void *dst, void *src, int len);
  51. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  52. unsigned long user_vaddr, void *dst, void *src, int len);
  53. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
  54. unsigned long pfn);
  55. void flush_cache_range(struct vm_area_struct *vma,
  56. unsigned long start, unsigned long end);
  57. /* defined in pacache.S exported in cache.c used by flush_anon_page */
  58. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  59. #define ARCH_HAS_FLUSH_ANON_PAGE
  60. void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
  61. #define ARCH_HAS_FLUSH_ON_KUNMAP
  62. static inline void kunmap_flush_on_unmap(const void *addr)
  63. {
  64. flush_kernel_dcache_page_addr(addr);
  65. }
  66. #endif /* _PARISC_CACHEFLUSH_H */