cache.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_CACHE_H
  3. #define __LINUX_CACHE_H
  4. #include <uapi/linux/kernel.h>
  5. #include <asm/cache.h>
  6. #ifndef L1_CACHE_ALIGN
  7. #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
  8. #endif
  9. #ifndef SMP_CACHE_BYTES
  10. #define SMP_CACHE_BYTES L1_CACHE_BYTES
  11. #endif
  12. /*
  13. * __read_mostly is used to keep rarely changing variables out of frequently
  14. * updated cachelines. Its use should be reserved for data that is used
  15. * frequently in hot paths. Performance traces can help decide when to use
  16. * this. You want __read_mostly data to be tightly packed, so that in the
  17. * best case multiple frequently read variables for a hot path will be next
  18. * to each other in order to reduce the number of cachelines needed to
  19. * execute a critical path. We should be mindful and selective of its use.
  20. * ie: if you're going to use it please supply a *good* justification in your
  21. * commit log
  22. */
  23. #ifndef __read_mostly
  24. #define __read_mostly
  25. #endif
  26. /*
  27. * __ro_after_init is used to mark things that are read-only after init (i.e.
  28. * after mark_rodata_ro() has been called). These are effectively read-only,
  29. * but may get written to during init, so can't live in .rodata (via "const").
  30. */
  31. #ifndef __ro_after_init
  32. #define __ro_after_init __section(".data..ro_after_init")
  33. #endif
  34. #ifndef ____cacheline_aligned
  35. #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
  36. #endif
  37. #ifndef ____cacheline_aligned_in_smp
  38. #ifdef CONFIG_SMP
  39. #define ____cacheline_aligned_in_smp ____cacheline_aligned
  40. #else
  41. #define ____cacheline_aligned_in_smp
  42. #endif /* CONFIG_SMP */
  43. #endif
  44. #ifndef __cacheline_aligned
  45. #define __cacheline_aligned \
  46. __attribute__((__aligned__(SMP_CACHE_BYTES), \
  47. __section__(".data..cacheline_aligned")))
  48. #endif /* __cacheline_aligned */
  49. #ifndef __cacheline_aligned_in_smp
  50. #ifdef CONFIG_SMP
  51. #define __cacheline_aligned_in_smp __cacheline_aligned
  52. #else
  53. #define __cacheline_aligned_in_smp
  54. #endif /* CONFIG_SMP */
  55. #endif
  56. /*
  57. * The maximum alignment needed for some critical structures
  58. * These could be inter-node cacheline sizes/L3 cacheline
  59. * size etc. Define this in asm/cache.h for your arch
  60. */
  61. #ifndef INTERNODE_CACHE_SHIFT
  62. #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
  63. #endif
  64. #if !defined(____cacheline_internodealigned_in_smp)
  65. #if defined(CONFIG_SMP)
  66. #define ____cacheline_internodealigned_in_smp \
  67. __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
  68. #else
  69. #define ____cacheline_internodealigned_in_smp
  70. #endif
  71. #endif
  72. #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
  73. #define cache_line_size() L1_CACHE_BYTES
  74. #endif
  75. /*
  76. * Helper to add padding within a struct to ensure data fall into separate
  77. * cachelines.
  78. */
  79. #if defined(CONFIG_SMP)
  80. struct cacheline_padding {
  81. char x[0];
  82. } ____cacheline_internodealigned_in_smp;
  83. #define CACHELINE_PADDING(name) struct cacheline_padding name
  84. #else
  85. #define CACHELINE_PADDING(name)
  86. #endif
  87. #endif /* __LINUX_CACHE_H */