kfence.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
  4. * handler integration. For more info see Documentation/dev-tools/kfence.rst.
  5. *
  6. * Copyright (C) 2020, Google LLC.
  7. */
  8. #ifndef _LINUX_KFENCE_H
  9. #define _LINUX_KFENCE_H
  10. #include <linux/mm.h>
  11. #include <linux/types.h>
  12. #ifdef CONFIG_KFENCE
  13. #include <linux/atomic.h>
  14. #include <linux/static_key.h>
  15. extern unsigned long kfence_sample_interval;
  16. /*
  17. * We allocate an even number of pages, as it simplifies calculations to map
  18. * address to metadata indices; effectively, the very first page serves as an
  19. * extended guard page, but otherwise has no special purpose.
  20. */
  21. #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
  22. extern char *__kfence_pool;
  23. DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
  24. extern atomic_t kfence_allocation_gate;
  25. /**
  26. * is_kfence_address() - check if an address belongs to KFENCE pool
  27. * @addr: address to check
  28. *
  29. * Return: true or false depending on whether the address is within the KFENCE
  30. * object range.
  31. *
  32. * KFENCE objects live in a separate page range and are not to be intermixed
  33. * with regular heap objects (e.g. KFENCE objects must never be added to the
  34. * allocator freelists). Failing to do so may and will result in heap
  35. * corruptions, therefore is_kfence_address() must be used to check whether
  36. * an object requires specific handling.
  37. *
  38. * Note: This function may be used in fast-paths, and is performance critical.
  39. * Future changes should take this into account; for instance, we want to avoid
  40. * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
  41. * constant (until immediate patching support is added to the kernel).
  42. */
  43. static __always_inline bool is_kfence_address(const void *addr)
  44. {
  45. /*
  46. * The __kfence_pool != NULL check is required to deal with the case
  47. * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
  48. * the slow-path after the range-check!
  49. */
  50. return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
  51. }
  52. /**
  53. * kfence_alloc_pool() - allocate the KFENCE pool via memblock
  54. */
  55. void __init kfence_alloc_pool(void);
  56. /**
  57. * kfence_init() - perform KFENCE initialization at boot time
  58. *
  59. * Requires that kfence_alloc_pool() was called before. This sets up the
  60. * allocation gate timer, and requires that workqueues are available.
  61. */
  62. void __init kfence_init(void);
  63. /**
  64. * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
  65. * @s: cache being shut down
  66. *
  67. * Before shutting down a cache, one must ensure there are no remaining objects
  68. * allocated from it. Because KFENCE objects are not referenced from the cache
  69. * directly, we need to check them here.
  70. *
  71. * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
  72. * not return if allocated objects still exist: it prints an error message and
  73. * simply aborts destruction of a cache, leaking memory.
  74. *
  75. * If the only such objects are KFENCE objects, we will not leak the entire
  76. * cache, but instead try to provide more useful debug info by making allocated
  77. * objects "zombie allocations". Objects may then still be used or freed (which
  78. * is handled gracefully), but usage will result in showing KFENCE error reports
  79. * which include stack traces to the user of the object, the original allocation
  80. * site, and caller to shutdown_cache().
  81. */
  82. void kfence_shutdown_cache(struct kmem_cache *s);
  83. /*
  84. * Allocate a KFENCE object. Allocators must not call this function directly,
  85. * use kfence_alloc() instead.
  86. */
  87. void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
  88. /**
  89. * kfence_alloc() - allocate a KFENCE object with a low probability
  90. * @s: struct kmem_cache with object requirements
  91. * @size: exact size of the object to allocate (can be less than @s->size
  92. * e.g. for kmalloc caches)
  93. * @flags: GFP flags
  94. *
  95. * Return:
  96. * * NULL - must proceed with allocating as usual,
  97. * * non-NULL - pointer to a KFENCE object.
  98. *
  99. * kfence_alloc() should be inserted into the heap allocation fast path,
  100. * allowing it to transparently return KFENCE-allocated objects with a low
  101. * probability using a static branch (the probability is controlled by the
  102. * kfence.sample_interval boot parameter).
  103. */
  104. static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
  105. {
  106. #if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
  107. if (!static_branch_unlikely(&kfence_allocation_key))
  108. return NULL;
  109. #else
  110. if (!static_branch_likely(&kfence_allocation_key))
  111. return NULL;
  112. #endif
  113. if (likely(atomic_read(&kfence_allocation_gate)))
  114. return NULL;
  115. return __kfence_alloc(s, size, flags);
  116. }
  117. /**
  118. * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
  119. * @addr: pointer to a heap object
  120. *
  121. * Return:
  122. * * 0 - not a KFENCE object, must call __ksize() instead,
  123. * * non-0 - this many bytes can be accessed without causing a memory error.
  124. *
  125. * kfence_ksize() returns the number of bytes requested for a KFENCE object at
  126. * allocation time. This number may be less than the object size of the
  127. * corresponding struct kmem_cache.
  128. */
  129. size_t kfence_ksize(const void *addr);
  130. /**
  131. * kfence_object_start() - find the beginning of a KFENCE object
  132. * @addr: address within a KFENCE-allocated object
  133. *
  134. * Return: address of the beginning of the object.
  135. *
  136. * SL[AU]B-allocated objects are laid out within a page one by one, so it is
  137. * easy to calculate the beginning of an object given a pointer inside it and
  138. * the object size. The same is not true for KFENCE, which places a single
  139. * object at either end of the page. This helper function is used to find the
  140. * beginning of a KFENCE-allocated object.
  141. */
  142. void *kfence_object_start(const void *addr);
  143. /**
  144. * __kfence_free() - release a KFENCE heap object to KFENCE pool
  145. * @addr: object to be freed
  146. *
  147. * Requires: is_kfence_address(addr)
  148. *
  149. * Release a KFENCE object and mark it as freed.
  150. */
  151. void __kfence_free(void *addr);
  152. /**
  153. * kfence_free() - try to release an arbitrary heap object to KFENCE pool
  154. * @addr: object to be freed
  155. *
  156. * Return:
  157. * * false - object doesn't belong to KFENCE pool and was ignored,
  158. * * true - object was released to KFENCE pool.
  159. *
  160. * Release a KFENCE object and mark it as freed. May be called on any object,
  161. * even non-KFENCE objects, to simplify integration of the hooks into the
  162. * allocator's free codepath. The allocator must check the return value to
  163. * determine if it was a KFENCE object or not.
  164. */
  165. static __always_inline __must_check bool kfence_free(void *addr)
  166. {
  167. if (!is_kfence_address(addr))
  168. return false;
  169. __kfence_free(addr);
  170. return true;
  171. }
  172. /**
  173. * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
  174. * @addr: faulting address
  175. * @is_write: is access a write
  176. * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
  177. *
  178. * Return:
  179. * * false - address outside KFENCE pool,
  180. * * true - page fault handled by KFENCE, no additional handling required.
  181. *
  182. * A page fault inside KFENCE pool indicates a memory error, such as an
  183. * out-of-bounds access, a use-after-free or an invalid memory access. In these
  184. * cases KFENCE prints an error message and marks the offending page as
  185. * present, so that the kernel can proceed.
  186. */
  187. bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
  188. #ifdef CONFIG_PRINTK
  189. struct kmem_obj_info;
  190. /**
  191. * __kfence_obj_info() - fill kmem_obj_info struct
  192. * @kpp: kmem_obj_info to be filled
  193. * @object: the object
  194. *
  195. * Return:
  196. * * false - not a KFENCE object
  197. * * true - a KFENCE object, filled @kpp
  198. *
  199. * Copies information to @kpp for KFENCE objects.
  200. */
  201. bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
  202. #endif
  203. #else /* CONFIG_KFENCE */
  204. static inline bool is_kfence_address(const void *addr) { return false; }
  205. static inline void kfence_alloc_pool(void) { }
  206. static inline void kfence_init(void) { }
  207. static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
  208. static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
  209. static inline size_t kfence_ksize(const void *addr) { return 0; }
  210. static inline void *kfence_object_start(const void *addr) { return NULL; }
  211. static inline void __kfence_free(void *addr) { }
  212. static inline bool __must_check kfence_free(void *addr) { return false; }
  213. static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
  214. struct pt_regs *regs)
  215. {
  216. return false;
  217. }
  218. #ifdef CONFIG_PRINTK
  219. struct kmem_obj_info;
  220. static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
  221. {
  222. return false;
  223. }
  224. #endif
  225. #endif
  226. #endif /* _LINUX_KFENCE_H */