kmsan.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * KMSAN API for subsystems.
  4. *
  5. * Copyright (C) 2017-2022 Google LLC
  6. * Author: Alexander Potapenko <[email protected]>
  7. *
  8. */
  9. #ifndef _LINUX_KMSAN_H
  10. #define _LINUX_KMSAN_H
  11. #include <linux/dma-direction.h>
  12. #include <linux/gfp.h>
  13. #include <linux/kmsan-checks.h>
  14. #include <linux/types.h>
  15. struct page;
  16. struct kmem_cache;
  17. struct task_struct;
  18. struct scatterlist;
  19. struct urb;
  20. #ifdef CONFIG_KMSAN
  21. /**
  22. * kmsan_task_create() - Initialize KMSAN state for the task.
  23. * @task: task to initialize.
  24. */
  25. void kmsan_task_create(struct task_struct *task);
  26. /**
  27. * kmsan_task_exit() - Notify KMSAN that a task has exited.
  28. * @task: task about to finish.
  29. */
  30. void kmsan_task_exit(struct task_struct *task);
  31. /**
  32. * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
  33. *
  34. * Allocate and initialize KMSAN metadata for early allocations.
  35. */
  36. void __init kmsan_init_shadow(void);
  37. /**
  38. * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
  39. */
  40. void __init kmsan_init_runtime(void);
  41. /**
  42. * kmsan_memblock_free_pages() - handle freeing of memblock pages.
  43. * @page: struct page to free.
  44. * @order: order of @page.
  45. *
  46. * Freed pages are either returned to buddy allocator or held back to be used
  47. * as metadata pages.
  48. */
  49. bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
  50. /**
  51. * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
  52. * @page: struct page pointer returned by alloc_pages().
  53. * @order: order of allocated struct page.
  54. * @flags: GFP flags used by alloc_pages()
  55. *
  56. * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
  57. * @flags contain __GFP_ZERO.
  58. */
  59. void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
  60. /**
  61. * kmsan_free_page() - Notify KMSAN about a free_pages() call.
  62. * @page: struct page pointer passed to free_pages().
  63. * @order: order of deallocated struct page.
  64. *
  65. * KMSAN marks freed memory as uninitialized.
  66. */
  67. void kmsan_free_page(struct page *page, unsigned int order);
  68. /**
  69. * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
  70. * @dst: destination page.
  71. * @src: source page.
  72. *
  73. * KMSAN copies the contents of metadata pages for @src into the metadata pages
  74. * for @dst. If @dst has no associated metadata pages, nothing happens.
  75. * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
  76. */
  77. void kmsan_copy_page_meta(struct page *dst, struct page *src);
  78. /**
  79. * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
  80. * @s: slab cache the object belongs to.
  81. * @object: object pointer.
  82. * @flags: GFP flags passed to the allocator.
  83. *
  84. * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
  85. * newly created object, marking it as initialized or uninitialized.
  86. */
  87. void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
  88. /**
  89. * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
  90. * @s: slab cache the object belongs to.
  91. * @object: object pointer.
  92. *
  93. * KMSAN marks the freed object as uninitialized.
  94. */
  95. void kmsan_slab_free(struct kmem_cache *s, void *object);
  96. /**
  97. * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
  98. * @ptr: object pointer.
  99. * @size: object size.
  100. * @flags: GFP flags passed to the allocator.
  101. *
  102. * Similar to kmsan_slab_alloc(), but for large allocations.
  103. */
  104. void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
  105. /**
  106. * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
  107. * @ptr: object pointer.
  108. *
  109. * Similar to kmsan_slab_free(), but for large allocations.
  110. */
  111. void kmsan_kfree_large(const void *ptr);
  112. /**
  113. * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
  114. * @start: start of vmapped range.
  115. * @end: end of vmapped range.
  116. * @prot: page protection flags used for vmap.
  117. * @pages: array of pages.
  118. * @page_shift: page_shift passed to vmap_range_noflush().
  119. *
  120. * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
  121. * vmalloc metadata address range. Returns 0 on success, callers must check
  122. * for non-zero return value.
  123. */
  124. int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
  125. pgprot_t prot, struct page **pages,
  126. unsigned int page_shift);
  127. /**
  128. * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
  129. * @start: start of vunmapped range.
  130. * @end: end of vunmapped range.
  131. *
  132. * KMSAN unmaps the contiguous metadata ranges created by
  133. * kmsan_map_kernel_range_noflush().
  134. */
  135. void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
  136. /**
  137. * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
  138. * @addr: range start.
  139. * @end: range end.
  140. * @phys_addr: physical range start.
  141. * @prot: page protection flags used for ioremap_page_range().
  142. * @page_shift: page_shift argument passed to vmap_range_noflush().
  143. *
  144. * KMSAN creates new metadata pages for the physical pages mapped into the
  145. * virtual memory. Returns 0 on success, callers must check for non-zero return
  146. * value.
  147. */
  148. int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
  149. phys_addr_t phys_addr, pgprot_t prot,
  150. unsigned int page_shift);
  151. /**
  152. * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
  153. * @start: range start.
  154. * @end: range end.
  155. *
  156. * KMSAN unmaps the metadata pages for the given range and, unlike for
  157. * vunmap_page_range(), also deallocates them.
  158. */
  159. void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
  160. /**
  161. * kmsan_handle_dma() - Handle a DMA data transfer.
  162. * @page: first page of the buffer.
  163. * @offset: offset of the buffer within the first page.
  164. * @size: buffer size.
  165. * @dir: one of possible dma_data_direction values.
  166. *
  167. * Depending on @direction, KMSAN:
  168. * * checks the buffer, if it is copied to device;
  169. * * initializes the buffer, if it is copied from device;
  170. * * does both, if this is a DMA_BIDIRECTIONAL transfer.
  171. */
  172. void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
  173. enum dma_data_direction dir);
  174. /**
  175. * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
  176. * @sg: scatterlist holding DMA buffers.
  177. * @nents: number of scatterlist entries.
  178. * @dir: one of possible dma_data_direction values.
  179. *
  180. * Depending on @direction, KMSAN:
  181. * * checks the buffers in the scatterlist, if they are copied to device;
  182. * * initializes the buffers, if they are copied from device;
  183. * * does both, if this is a DMA_BIDIRECTIONAL transfer.
  184. */
  185. void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
  186. enum dma_data_direction dir);
  187. /**
  188. * kmsan_handle_urb() - Handle a USB data transfer.
  189. * @urb: struct urb pointer.
  190. * @is_out: data transfer direction (true means output to hardware).
  191. *
  192. * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
  193. * KMSAN initializes the transfer buffer.
  194. */
  195. void kmsan_handle_urb(const struct urb *urb, bool is_out);
  196. /**
  197. * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
  198. * @regs: struct pt_regs pointer received from assembly code.
  199. *
  200. * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
  201. * false positive reports. Unlike kmsan_unpoison_memory(),
  202. * kmsan_unpoison_entry_regs() can be called from the regions where
  203. * kmsan_in_runtime() returns true, which is the case in early entry code.
  204. */
  205. void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
  206. #else
  207. static inline void kmsan_init_shadow(void)
  208. {
  209. }
  210. static inline void kmsan_init_runtime(void)
  211. {
  212. }
  213. static inline bool kmsan_memblock_free_pages(struct page *page,
  214. unsigned int order)
  215. {
  216. return true;
  217. }
  218. static inline void kmsan_task_create(struct task_struct *task)
  219. {
  220. }
  221. static inline void kmsan_task_exit(struct task_struct *task)
  222. {
  223. }
  224. static inline int kmsan_alloc_page(struct page *page, unsigned int order,
  225. gfp_t flags)
  226. {
  227. return 0;
  228. }
  229. static inline void kmsan_free_page(struct page *page, unsigned int order)
  230. {
  231. }
  232. static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
  233. {
  234. }
  235. static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
  236. gfp_t flags)
  237. {
  238. }
  239. static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
  240. {
  241. }
  242. static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
  243. gfp_t flags)
  244. {
  245. }
  246. static inline void kmsan_kfree_large(const void *ptr)
  247. {
  248. }
  249. static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
  250. unsigned long end,
  251. pgprot_t prot,
  252. struct page **pages,
  253. unsigned int page_shift)
  254. {
  255. return 0;
  256. }
  257. static inline void kmsan_vunmap_range_noflush(unsigned long start,
  258. unsigned long end)
  259. {
  260. }
  261. static inline int kmsan_ioremap_page_range(unsigned long start,
  262. unsigned long end,
  263. phys_addr_t phys_addr, pgprot_t prot,
  264. unsigned int page_shift)
  265. {
  266. return 0;
  267. }
  268. static inline void kmsan_iounmap_page_range(unsigned long start,
  269. unsigned long end)
  270. {
  271. }
  272. static inline void kmsan_handle_dma(struct page *page, size_t offset,
  273. size_t size, enum dma_data_direction dir)
  274. {
  275. }
  276. static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
  277. enum dma_data_direction dir)
  278. {
  279. }
  280. static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
  281. {
  282. }
  283. static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
  284. {
  285. }
  286. #endif
  287. #endif /* _LINUX_KMSAN_H */