percpu-internal.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _MM_PERCPU_INTERNAL_H
  3. #define _MM_PERCPU_INTERNAL_H
  4. #include <linux/types.h>
  5. #include <linux/percpu.h>
  6. /*
  7. * pcpu_block_md is the metadata block struct.
  8. * Each chunk's bitmap is split into a number of full blocks.
  9. * All units are in terms of bits.
  10. *
  11. * The scan hint is the largest known contiguous area before the contig hint.
  12. * It is not necessarily the actual largest contig hint though. There is an
  13. * invariant that the scan_hint_start > contig_hint_start iff
  14. * scan_hint == contig_hint. This is necessary because when scanning forward,
  15. * we don't know if a new contig hint would be better than the current one.
  16. */
  17. struct pcpu_block_md {
  18. int scan_hint; /* scan hint for block */
  19. int scan_hint_start; /* block relative starting
  20. position of the scan hint */
  21. int contig_hint; /* contig hint for block */
  22. int contig_hint_start; /* block relative starting
  23. position of the contig hint */
  24. int left_free; /* size of free space along
  25. the left side of the block */
  26. int right_free; /* size of free space along
  27. the right side of the block */
  28. int first_free; /* block position of first free */
  29. int nr_bits; /* total bits responsible for */
  30. };
  31. struct pcpu_chunk {
  32. #ifdef CONFIG_PERCPU_STATS
  33. int nr_alloc; /* # of allocations */
  34. size_t max_alloc_size; /* largest allocation size */
  35. #endif
  36. struct list_head list; /* linked to pcpu_slot lists */
  37. int free_bytes; /* free bytes in the chunk */
  38. struct pcpu_block_md chunk_md;
  39. void *base_addr; /* base address of this chunk */
  40. unsigned long *alloc_map; /* allocation map */
  41. unsigned long *bound_map; /* boundary map */
  42. struct pcpu_block_md *md_blocks; /* metadata blocks */
  43. void *data; /* chunk data */
  44. bool immutable; /* no [de]population allowed */
  45. bool isolated; /* isolated from active chunk
  46. slots */
  47. int start_offset; /* the overlap with the previous
  48. region to have a page aligned
  49. base_addr */
  50. int end_offset; /* additional area required to
  51. have the region end page
  52. aligned */
  53. #ifdef CONFIG_MEMCG_KMEM
  54. struct obj_cgroup **obj_cgroups; /* vector of object cgroups */
  55. #endif
  56. int nr_pages; /* # of pages served by this chunk */
  57. int nr_populated; /* # of populated pages */
  58. int nr_empty_pop_pages; /* # of empty populated pages */
  59. unsigned long populated[]; /* populated bitmap */
  60. };
  61. extern spinlock_t pcpu_lock;
  62. extern struct list_head *pcpu_chunk_lists;
  63. extern int pcpu_nr_slots;
  64. extern int pcpu_sidelined_slot;
  65. extern int pcpu_to_depopulate_slot;
  66. extern int pcpu_nr_empty_pop_pages;
  67. extern struct pcpu_chunk *pcpu_first_chunk;
  68. extern struct pcpu_chunk *pcpu_reserved_chunk;
  69. /**
  70. * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
  71. * @chunk: chunk of interest
  72. *
  73. * This conversion is from the number of physical pages that the chunk
  74. * serves to the number of bitmap blocks used.
  75. */
  76. static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
  77. {
  78. return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
  79. }
  80. /**
  81. * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
  82. * @pages: number of physical pages
  83. *
  84. * This conversion is from physical pages to the number of bits
  85. * required in the bitmap.
  86. */
  87. static inline int pcpu_nr_pages_to_map_bits(int pages)
  88. {
  89. return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
  90. }
  91. /**
  92. * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
  93. * @chunk: chunk of interest
  94. *
  95. * This conversion is from the number of physical pages that the chunk
  96. * serves to the number of bits in the bitmap.
  97. */
  98. static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
  99. {
  100. return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
  101. }
  102. /**
  103. * pcpu_obj_full_size - helper to calculate size of each accounted object
  104. * @size: size of area to allocate in bytes
  105. *
  106. * For each accounted object there is an extra space which is used to store
  107. * obj_cgroup membership. Charge it too.
  108. */
  109. static inline size_t pcpu_obj_full_size(size_t size)
  110. {
  111. size_t extra_size = 0;
  112. #ifdef CONFIG_MEMCG_KMEM
  113. extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
  114. #endif
  115. return size * num_possible_cpus() + extra_size;
  116. }
  117. #ifdef CONFIG_PERCPU_STATS
  118. #include <linux/spinlock.h>
  119. struct percpu_stats {
  120. u64 nr_alloc; /* lifetime # of allocations */
  121. u64 nr_dealloc; /* lifetime # of deallocations */
  122. u64 nr_cur_alloc; /* current # of allocations */
  123. u64 nr_max_alloc; /* max # of live allocations */
  124. u32 nr_chunks; /* current # of live chunks */
  125. u32 nr_max_chunks; /* max # of live chunks */
  126. size_t min_alloc_size; /* min allocation size */
  127. size_t max_alloc_size; /* max allocation size */
  128. };
  129. extern struct percpu_stats pcpu_stats;
  130. extern struct pcpu_alloc_info pcpu_stats_ai;
  131. /*
  132. * For debug purposes. We don't care about the flexible array.
  133. */
  134. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  135. {
  136. memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
  137. /* initialize min_alloc_size to unit_size */
  138. pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
  139. }
  140. /*
  141. * pcpu_stats_area_alloc - increment area allocation stats
  142. * @chunk: the location of the area being allocated
  143. * @size: size of area to allocate in bytes
  144. *
  145. * CONTEXT:
  146. * pcpu_lock.
  147. */
  148. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  149. {
  150. lockdep_assert_held(&pcpu_lock);
  151. pcpu_stats.nr_alloc++;
  152. pcpu_stats.nr_cur_alloc++;
  153. pcpu_stats.nr_max_alloc =
  154. max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
  155. pcpu_stats.min_alloc_size =
  156. min(pcpu_stats.min_alloc_size, size);
  157. pcpu_stats.max_alloc_size =
  158. max(pcpu_stats.max_alloc_size, size);
  159. chunk->nr_alloc++;
  160. chunk->max_alloc_size = max(chunk->max_alloc_size, size);
  161. }
  162. /*
  163. * pcpu_stats_area_dealloc - decrement allocation stats
  164. * @chunk: the location of the area being deallocated
  165. *
  166. * CONTEXT:
  167. * pcpu_lock.
  168. */
  169. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  170. {
  171. lockdep_assert_held(&pcpu_lock);
  172. pcpu_stats.nr_dealloc++;
  173. pcpu_stats.nr_cur_alloc--;
  174. chunk->nr_alloc--;
  175. }
  176. /*
  177. * pcpu_stats_chunk_alloc - increment chunk stats
  178. */
  179. static inline void pcpu_stats_chunk_alloc(void)
  180. {
  181. unsigned long flags;
  182. spin_lock_irqsave(&pcpu_lock, flags);
  183. pcpu_stats.nr_chunks++;
  184. pcpu_stats.nr_max_chunks =
  185. max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
  186. spin_unlock_irqrestore(&pcpu_lock, flags);
  187. }
  188. /*
  189. * pcpu_stats_chunk_dealloc - decrement chunk stats
  190. */
  191. static inline void pcpu_stats_chunk_dealloc(void)
  192. {
  193. unsigned long flags;
  194. spin_lock_irqsave(&pcpu_lock, flags);
  195. pcpu_stats.nr_chunks--;
  196. spin_unlock_irqrestore(&pcpu_lock, flags);
  197. }
  198. #else
  199. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  200. {
  201. }
  202. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  203. {
  204. }
  205. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  206. {
  207. }
  208. static inline void pcpu_stats_chunk_alloc(void)
  209. {
  210. }
  211. static inline void pcpu_stats_chunk_dealloc(void)
  212. {
  213. }
  214. #endif /* !CONFIG_PERCPU_STATS */
  215. #endif