cacheflush.h 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #ifndef _ASM_CACHEFLUSH_H
  6. #define _ASM_CACHEFLUSH_H
  7. #include <linux/mm.h>
  8. #include <asm/cpu-info.h>
  9. #include <asm/cacheops.h>
  10. static inline bool cache_present(struct cache_desc *cdesc)
  11. {
  12. return cdesc->flags & CACHE_PRESENT;
  13. }
  14. static inline bool cache_private(struct cache_desc *cdesc)
  15. {
  16. return cdesc->flags & CACHE_PRIVATE;
  17. }
  18. static inline bool cache_inclusive(struct cache_desc *cdesc)
  19. {
  20. return cdesc->flags & CACHE_INCLUSIVE;
  21. }
  22. static inline unsigned int cpu_last_level_cache_line_size(void)
  23. {
  24. int cache_present = boot_cpu_data.cache_leaves_present;
  25. return boot_cpu_data.cache_leaves[cache_present - 1].linesz;
  26. }
  27. asmlinkage void __flush_cache_all(void);
  28. void local_flush_icache_range(unsigned long start, unsigned long end);
  29. #define flush_icache_range local_flush_icache_range
  30. #define flush_icache_user_range local_flush_icache_range
  31. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  32. #define flush_cache_all() do { } while (0)
  33. #define flush_cache_mm(mm) do { } while (0)
  34. #define flush_cache_dup_mm(mm) do { } while (0)
  35. #define flush_cache_range(vma, start, end) do { } while (0)
  36. #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
  37. #define flush_cache_vmap(start, end) do { } while (0)
  38. #define flush_cache_vunmap(start, end) do { } while (0)
  39. #define flush_icache_page(vma, page) do { } while (0)
  40. #define flush_icache_user_page(vma, page, addr, len) do { } while (0)
  41. #define flush_dcache_page(page) do { } while (0)
  42. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  43. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  44. #define cache_op(op, addr) \
  45. __asm__ __volatile__( \
  46. " cacop %0, %1 \n" \
  47. : \
  48. : "i" (op), "ZC" (*(unsigned char *)(addr)))
  49. static inline void flush_cache_line(int leaf, unsigned long addr)
  50. {
  51. switch (leaf) {
  52. case Cache_LEAF0:
  53. cache_op(Index_Writeback_Inv_LEAF0, addr);
  54. break;
  55. case Cache_LEAF1:
  56. cache_op(Index_Writeback_Inv_LEAF1, addr);
  57. break;
  58. case Cache_LEAF2:
  59. cache_op(Index_Writeback_Inv_LEAF2, addr);
  60. break;
  61. case Cache_LEAF3:
  62. cache_op(Index_Writeback_Inv_LEAF3, addr);
  63. break;
  64. case Cache_LEAF4:
  65. cache_op(Index_Writeback_Inv_LEAF4, addr);
  66. break;
  67. case Cache_LEAF5:
  68. cache_op(Index_Writeback_Inv_LEAF5, addr);
  69. break;
  70. default:
  71. break;
  72. }
  73. }
  74. #include <asm-generic/cacheflush.h>
  75. #endif /* _ASM_CACHEFLUSH_H */