kasan.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_KASAN_H
  3. #define _LINUX_KASAN_H
  4. #include <linux/bug.h>
  5. #include <linux/kasan-enabled.h>
  6. #include <linux/kernel.h>
  7. #include <linux/static_key.h>
  8. #include <linux/types.h>
  9. struct kmem_cache;
  10. struct page;
  11. struct slab;
  12. struct vm_struct;
  13. struct task_struct;
  14. #ifdef CONFIG_KASAN
  15. #include <linux/linkage.h>
  16. #include <asm/kasan.h>
  17. #endif
  18. typedef unsigned int __bitwise kasan_vmalloc_flags_t;
  19. #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
  20. #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
  21. #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
  22. #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
  23. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  24. #include <linux/pgtable.h>
  25. /* Software KASAN implementations use shadow memory. */
  26. #ifdef CONFIG_KASAN_SW_TAGS
  27. /* This matches KASAN_TAG_INVALID. */
  28. #define KASAN_SHADOW_INIT 0xFE
  29. #else
  30. #define KASAN_SHADOW_INIT 0
  31. #endif
  32. #ifndef PTE_HWTABLE_PTRS
  33. #define PTE_HWTABLE_PTRS 0
  34. #endif
  35. extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
  36. extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
  37. extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
  38. extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
  39. extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
  40. int kasan_populate_early_shadow(const void *shadow_start,
  41. const void *shadow_end);
  42. static inline void *kasan_mem_to_shadow(const void *addr)
  43. {
  44. return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
  45. + KASAN_SHADOW_OFFSET;
  46. }
  47. int kasan_add_zero_shadow(void *start, unsigned long size);
  48. void kasan_remove_zero_shadow(void *start, unsigned long size);
  49. /* Enable reporting bugs after kasan_disable_current() */
  50. extern void kasan_enable_current(void);
  51. /* Disable reporting bugs for current task */
  52. extern void kasan_disable_current(void);
  53. #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  54. static inline int kasan_add_zero_shadow(void *start, unsigned long size)
  55. {
  56. return 0;
  57. }
  58. static inline void kasan_remove_zero_shadow(void *start,
  59. unsigned long size)
  60. {}
  61. static inline void kasan_enable_current(void) {}
  62. static inline void kasan_disable_current(void) {}
  63. #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  64. #ifdef CONFIG_KASAN_HW_TAGS
  65. #else /* CONFIG_KASAN_HW_TAGS */
  66. #endif /* CONFIG_KASAN_HW_TAGS */
  67. static inline bool kasan_has_integrated_init(void)
  68. {
  69. return kasan_hw_tags_enabled();
  70. }
  71. #ifdef CONFIG_KASAN
  72. struct kasan_cache {
  73. #ifdef CONFIG_KASAN_GENERIC
  74. int alloc_meta_offset;
  75. int free_meta_offset;
  76. #endif
  77. bool is_kmalloc;
  78. };
  79. void __kasan_unpoison_range(const void *addr, size_t size);
  80. static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
  81. {
  82. if (kasan_enabled())
  83. __kasan_unpoison_range(addr, size);
  84. }
  85. void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
  86. static __always_inline void kasan_poison_pages(struct page *page,
  87. unsigned int order, bool init)
  88. {
  89. if (kasan_enabled())
  90. __kasan_poison_pages(page, order, init);
  91. }
  92. bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
  93. static __always_inline bool kasan_unpoison_pages(struct page *page,
  94. unsigned int order, bool init)
  95. {
  96. if (kasan_enabled())
  97. return __kasan_unpoison_pages(page, order, init);
  98. return false;
  99. }
  100. void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
  101. static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
  102. {
  103. if (kasan_enabled())
  104. __kasan_cache_create_kmalloc(cache);
  105. }
  106. void __kasan_poison_slab(struct slab *slab);
  107. static __always_inline void kasan_poison_slab(struct slab *slab)
  108. {
  109. if (kasan_enabled())
  110. __kasan_poison_slab(slab);
  111. }
  112. void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
  113. static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
  114. void *object)
  115. {
  116. if (kasan_enabled())
  117. __kasan_unpoison_object_data(cache, object);
  118. }
  119. void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
  120. static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
  121. void *object)
  122. {
  123. if (kasan_enabled())
  124. __kasan_poison_object_data(cache, object);
  125. }
  126. void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
  127. const void *object);
  128. static __always_inline void * __must_check kasan_init_slab_obj(
  129. struct kmem_cache *cache, const void *object)
  130. {
  131. if (kasan_enabled())
  132. return __kasan_init_slab_obj(cache, object);
  133. return (void *)object;
  134. }
  135. bool __kasan_slab_free(struct kmem_cache *s, void *object,
  136. unsigned long ip, bool init);
  137. static __always_inline bool kasan_slab_free(struct kmem_cache *s,
  138. void *object, bool init)
  139. {
  140. if (kasan_enabled())
  141. return __kasan_slab_free(s, object, _RET_IP_, init);
  142. return false;
  143. }
  144. void __kasan_kfree_large(void *ptr, unsigned long ip);
  145. static __always_inline void kasan_kfree_large(void *ptr)
  146. {
  147. if (kasan_enabled())
  148. __kasan_kfree_large(ptr, _RET_IP_);
  149. }
  150. void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
  151. static __always_inline void kasan_slab_free_mempool(void *ptr)
  152. {
  153. if (kasan_enabled())
  154. __kasan_slab_free_mempool(ptr, _RET_IP_);
  155. }
  156. void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
  157. void *object, gfp_t flags, bool init);
  158. static __always_inline void * __must_check kasan_slab_alloc(
  159. struct kmem_cache *s, void *object, gfp_t flags, bool init)
  160. {
  161. if (kasan_enabled())
  162. return __kasan_slab_alloc(s, object, flags, init);
  163. return object;
  164. }
  165. void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
  166. size_t size, gfp_t flags);
  167. static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
  168. const void *object, size_t size, gfp_t flags)
  169. {
  170. if (kasan_enabled())
  171. return __kasan_kmalloc(s, object, size, flags);
  172. return (void *)object;
  173. }
  174. void * __must_check __kasan_kmalloc_large(const void *ptr,
  175. size_t size, gfp_t flags);
  176. static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
  177. size_t size, gfp_t flags)
  178. {
  179. if (kasan_enabled())
  180. return __kasan_kmalloc_large(ptr, size, flags);
  181. return (void *)ptr;
  182. }
  183. void * __must_check __kasan_krealloc(const void *object,
  184. size_t new_size, gfp_t flags);
  185. static __always_inline void * __must_check kasan_krealloc(const void *object,
  186. size_t new_size, gfp_t flags)
  187. {
  188. if (kasan_enabled())
  189. return __kasan_krealloc(object, new_size, flags);
  190. return (void *)object;
  191. }
  192. /*
  193. * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
  194. * the hardware tag-based mode that doesn't rely on compiler instrumentation.
  195. */
  196. bool __kasan_check_byte(const void *addr, unsigned long ip);
  197. static __always_inline bool kasan_check_byte(const void *addr)
  198. {
  199. if (kasan_enabled())
  200. return __kasan_check_byte(addr, _RET_IP_);
  201. return true;
  202. }
  203. #else /* CONFIG_KASAN */
  204. static inline void kasan_unpoison_range(const void *address, size_t size) {}
  205. static inline void kasan_poison_pages(struct page *page, unsigned int order,
  206. bool init) {}
  207. static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
  208. bool init)
  209. {
  210. return false;
  211. }
  212. static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
  213. static inline void kasan_poison_slab(struct slab *slab) {}
  214. static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
  215. void *object) {}
  216. static inline void kasan_poison_object_data(struct kmem_cache *cache,
  217. void *object) {}
  218. static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
  219. const void *object)
  220. {
  221. return (void *)object;
  222. }
  223. static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
  224. {
  225. return false;
  226. }
  227. static inline void kasan_kfree_large(void *ptr) {}
  228. static inline void kasan_slab_free_mempool(void *ptr) {}
  229. static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
  230. gfp_t flags, bool init)
  231. {
  232. return object;
  233. }
  234. static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
  235. size_t size, gfp_t flags)
  236. {
  237. return (void *)object;
  238. }
  239. static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
  240. {
  241. return (void *)ptr;
  242. }
  243. static inline void *kasan_krealloc(const void *object, size_t new_size,
  244. gfp_t flags)
  245. {
  246. return (void *)object;
  247. }
  248. static inline bool kasan_check_byte(const void *address)
  249. {
  250. return true;
  251. }
  252. #endif /* CONFIG_KASAN */
  253. #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
  254. void kasan_unpoison_task_stack(struct task_struct *task);
  255. #else
  256. static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
  257. #endif
  258. #ifdef CONFIG_KASAN_GENERIC
  259. size_t kasan_metadata_size(struct kmem_cache *cache);
  260. slab_flags_t kasan_never_merge(void);
  261. void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
  262. slab_flags_t *flags);
  263. void kasan_cache_shrink(struct kmem_cache *cache);
  264. void kasan_cache_shutdown(struct kmem_cache *cache);
  265. void kasan_record_aux_stack(void *ptr);
  266. void kasan_record_aux_stack_noalloc(void *ptr);
  267. #else /* CONFIG_KASAN_GENERIC */
  268. /* Tag-based KASAN modes do not use per-object metadata. */
  269. static inline size_t kasan_metadata_size(struct kmem_cache *cache)
  270. {
  271. return 0;
  272. }
  273. /* And thus nothing prevents cache merging. */
  274. static inline slab_flags_t kasan_never_merge(void)
  275. {
  276. return 0;
  277. }
  278. /* And no cache-related metadata initialization is required. */
  279. static inline void kasan_cache_create(struct kmem_cache *cache,
  280. unsigned int *size,
  281. slab_flags_t *flags) {}
  282. static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
  283. static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
  284. static inline void kasan_record_aux_stack(void *ptr) {}
  285. static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
  286. #endif /* CONFIG_KASAN_GENERIC */
  287. #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
  288. static inline void *kasan_reset_tag(const void *addr)
  289. {
  290. return (void *)arch_kasan_reset_tag(addr);
  291. }
  292. /**
  293. * kasan_report - print a report about a bad memory access detected by KASAN
  294. * @addr: address of the bad access
  295. * @size: size of the bad access
  296. * @is_write: whether the bad access is a write or a read
  297. * @ip: instruction pointer for the accessibility check or the bad access itself
  298. */
  299. bool kasan_report(unsigned long addr, size_t size,
  300. bool is_write, unsigned long ip);
  301. #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
  302. static inline void *kasan_reset_tag(const void *addr)
  303. {
  304. return (void *)addr;
  305. }
  306. #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
  307. #ifdef CONFIG_KASAN_HW_TAGS
  308. void kasan_report_async(void);
  309. #endif /* CONFIG_KASAN_HW_TAGS */
  310. #ifdef CONFIG_KASAN_SW_TAGS
  311. void __init kasan_init_sw_tags(void);
  312. #else
  313. static inline void kasan_init_sw_tags(void) { }
  314. #endif
  315. #ifdef CONFIG_KASAN_HW_TAGS
  316. void kasan_init_hw_tags_cpu(void);
  317. void __init kasan_init_hw_tags(void);
  318. #else
  319. static inline void kasan_init_hw_tags_cpu(void) { }
  320. static inline void kasan_init_hw_tags(void) { }
  321. #endif
  322. #ifdef CONFIG_KASAN_VMALLOC
  323. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  324. void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
  325. int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
  326. void kasan_release_vmalloc(unsigned long start, unsigned long end,
  327. unsigned long free_region_start,
  328. unsigned long free_region_end);
  329. #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  330. static inline void kasan_populate_early_vm_area_shadow(void *start,
  331. unsigned long size)
  332. { }
  333. static inline int kasan_populate_vmalloc(unsigned long start,
  334. unsigned long size)
  335. {
  336. return 0;
  337. }
  338. static inline void kasan_release_vmalloc(unsigned long start,
  339. unsigned long end,
  340. unsigned long free_region_start,
  341. unsigned long free_region_end) { }
  342. #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  343. void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
  344. kasan_vmalloc_flags_t flags);
  345. static __always_inline void *kasan_unpoison_vmalloc(const void *start,
  346. unsigned long size,
  347. kasan_vmalloc_flags_t flags)
  348. {
  349. if (kasan_enabled())
  350. return __kasan_unpoison_vmalloc(start, size, flags);
  351. return (void *)start;
  352. }
  353. void __kasan_poison_vmalloc(const void *start, unsigned long size);
  354. static __always_inline void kasan_poison_vmalloc(const void *start,
  355. unsigned long size)
  356. {
  357. if (kasan_enabled())
  358. __kasan_poison_vmalloc(start, size);
  359. }
  360. #else /* CONFIG_KASAN_VMALLOC */
  361. static inline void kasan_populate_early_vm_area_shadow(void *start,
  362. unsigned long size) { }
  363. static inline int kasan_populate_vmalloc(unsigned long start,
  364. unsigned long size)
  365. {
  366. return 0;
  367. }
  368. static inline void kasan_release_vmalloc(unsigned long start,
  369. unsigned long end,
  370. unsigned long free_region_start,
  371. unsigned long free_region_end) { }
  372. static inline void *kasan_unpoison_vmalloc(const void *start,
  373. unsigned long size,
  374. kasan_vmalloc_flags_t flags)
  375. {
  376. return (void *)start;
  377. }
  378. static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
  379. { }
  380. #endif /* CONFIG_KASAN_VMALLOC */
  381. #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
  382. !defined(CONFIG_KASAN_VMALLOC)
  383. /*
  384. * These functions allocate and free shadow memory for kernel modules.
  385. * They are only required when KASAN_VMALLOC is not supported, as otherwise
  386. * shadow memory is allocated by the generic vmalloc handlers.
  387. */
  388. int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
  389. void kasan_free_module_shadow(const struct vm_struct *vm);
  390. #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
  391. static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
  392. static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
  393. #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
  394. #ifdef CONFIG_KASAN_INLINE
  395. void kasan_non_canonical_hook(unsigned long addr);
  396. #else /* CONFIG_KASAN_INLINE */
  397. static inline void kasan_non_canonical_hook(unsigned long addr) { }
  398. #endif /* CONFIG_KASAN_INLINE */
  399. #endif /* LINUX_KASAN_H */