random.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_RANDOM_H
  3. #define _LINUX_RANDOM_H
  4. #include <linux/bug.h>
  5. #include <linux/kernel.h>
  6. #include <linux/list.h>
  7. #include <linux/once.h>
  8. #include <uapi/linux/random.h>
  9. struct notifier_block;
  10. void add_device_randomness(const void *buf, size_t len);
  11. void __init add_bootloader_randomness(const void *buf, size_t len);
  12. void add_input_randomness(unsigned int type, unsigned int code,
  13. unsigned int value) __latent_entropy;
  14. void add_interrupt_randomness(int irq) __latent_entropy;
  15. void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
  16. static inline void add_latent_entropy(void)
  17. {
  18. #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
  19. add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
  20. #else
  21. add_device_randomness(NULL, 0);
  22. #endif
  23. }
  24. #if IS_ENABLED(CONFIG_VMGENID)
  25. void add_vmfork_randomness(const void *unique_vm_id, size_t len);
  26. int register_random_vmfork_notifier(struct notifier_block *nb);
  27. int unregister_random_vmfork_notifier(struct notifier_block *nb);
  28. #else
  29. static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
  30. static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
  31. #endif
  32. void get_random_bytes(void *buf, size_t len);
  33. u8 get_random_u8(void);
  34. u16 get_random_u16(void);
  35. u32 get_random_u32(void);
  36. u64 get_random_u64(void);
  37. static inline unsigned long get_random_long(void)
  38. {
  39. #if BITS_PER_LONG == 64
  40. return get_random_u64();
  41. #else
  42. return get_random_u32();
  43. #endif
  44. }
  45. u32 __get_random_u32_below(u32 ceil);
  46. /*
  47. * Returns a random integer in the interval [0, ceil), with uniform
  48. * distribution, suitable for all uses. Fastest when ceil is a constant, but
  49. * still fast for variable ceil as well.
  50. */
  51. static inline u32 get_random_u32_below(u32 ceil)
  52. {
  53. if (!__builtin_constant_p(ceil))
  54. return __get_random_u32_below(ceil);
  55. /*
  56. * For the fast path, below, all operations on ceil are precomputed by
  57. * the compiler, so this incurs no overhead for checking pow2, doing
  58. * divisions, or branching based on integer size. The resultant
  59. * algorithm does traditional reciprocal multiplication (typically
  60. * optimized by the compiler into shifts and adds), rejecting samples
  61. * whose lower half would indicate a range indivisible by ceil.
  62. */
  63. BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
  64. if (ceil <= 1)
  65. return 0;
  66. for (;;) {
  67. if (ceil <= 1U << 8) {
  68. u32 mult = ceil * get_random_u8();
  69. if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
  70. return mult >> 8;
  71. } else if (ceil <= 1U << 16) {
  72. u32 mult = ceil * get_random_u16();
  73. if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
  74. return mult >> 16;
  75. } else {
  76. u64 mult = (u64)ceil * get_random_u32();
  77. if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
  78. return mult >> 32;
  79. }
  80. }
  81. }
  82. /*
  83. * Returns a random integer in the interval (floor, U32_MAX], with uniform
  84. * distribution, suitable for all uses. Fastest when floor is a constant, but
  85. * still fast for variable floor as well.
  86. */
  87. static inline u32 get_random_u32_above(u32 floor)
  88. {
  89. BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
  90. "get_random_u32_above() must take floor < U32_MAX");
  91. return floor + 1 + get_random_u32_below(U32_MAX - floor);
  92. }
  93. /*
  94. * Returns a random integer in the interval [floor, ceil], with uniform
  95. * distribution, suitable for all uses. Fastest when floor and ceil are
  96. * constant, but still fast for variable floor and ceil as well.
  97. */
  98. static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
  99. {
  100. BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
  101. (floor > ceil || ceil - floor == U32_MAX),
  102. "get_random_u32_inclusive() must take floor <= ceil");
  103. return floor + get_random_u32_below(ceil - floor + 1);
  104. }
  105. /*
  106. * On 64-bit architectures, protect against non-terminated C string overflows
  107. * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
  108. */
  109. #ifdef CONFIG_64BIT
  110. # ifdef __LITTLE_ENDIAN
  111. # define CANARY_MASK 0xffffffffffffff00UL
  112. # else /* big endian, 64 bits: */
  113. # define CANARY_MASK 0x00ffffffffffffffUL
  114. # endif
  115. #else /* 32 bits: */
  116. # define CANARY_MASK 0xffffffffUL
  117. #endif
  118. static inline unsigned long get_random_canary(void)
  119. {
  120. return get_random_long() & CANARY_MASK;
  121. }
  122. void __init random_init_early(const char *command_line);
  123. void __init random_init(void);
  124. bool rng_is_initialized(void);
  125. int wait_for_random_bytes(void);
  126. /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
  127. * Returns the result of the call to wait_for_random_bytes. */
  128. static inline int get_random_bytes_wait(void *buf, size_t nbytes)
  129. {
  130. int ret = wait_for_random_bytes();
  131. get_random_bytes(buf, nbytes);
  132. return ret;
  133. }
  134. #define declare_get_random_var_wait(name, ret_type) \
  135. static inline int get_random_ ## name ## _wait(ret_type *out) { \
  136. int ret = wait_for_random_bytes(); \
  137. if (unlikely(ret)) \
  138. return ret; \
  139. *out = get_random_ ## name(); \
  140. return 0; \
  141. }
  142. declare_get_random_var_wait(u8, u8)
  143. declare_get_random_var_wait(u16, u16)
  144. declare_get_random_var_wait(u32, u32)
  145. declare_get_random_var_wait(u64, u32)
  146. declare_get_random_var_wait(long, unsigned long)
  147. #undef declare_get_random_var
  148. /*
  149. * This is designed to be standalone for just prandom
  150. * users, but for now we include it from <linux/random.h>
  151. * for legacy reasons.
  152. */
  153. #include <linux/prandom.h>
  154. #include <asm/archrandom.h>
  155. /*
  156. * Called from the boot CPU during startup; not valid to call once
  157. * secondary CPUs are up and preemption is possible.
  158. */
  159. #ifndef arch_get_random_seed_longs_early
  160. static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
  161. {
  162. WARN_ON(system_state != SYSTEM_BOOTING);
  163. return arch_get_random_seed_longs(v, max_longs);
  164. }
  165. #endif
  166. #ifndef arch_get_random_longs_early
  167. static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
  168. {
  169. WARN_ON(system_state != SYSTEM_BOOTING);
  170. return arch_get_random_longs(v, max_longs);
  171. }
  172. #endif
  173. #ifdef CONFIG_SMP
  174. int random_prepare_cpu(unsigned int cpu);
  175. int random_online_cpu(unsigned int cpu);
  176. #endif
  177. #ifndef MODULE
  178. extern const struct file_operations random_fops, urandom_fops;
  179. #endif
  180. #endif /* _LINUX_RANDOM_H */