kaslr.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2019
  4. */
  5. #include <linux/pgtable.h>
  6. #include <asm/mem_detect.h>
  7. #include <asm/cpacf.h>
  8. #include <asm/timex.h>
  9. #include <asm/sclp.h>
  10. #include <asm/kasan.h>
  11. #include "decompressor.h"
  12. #include "boot.h"
  13. #define PRNG_MODE_TDES 1
  14. #define PRNG_MODE_SHA512 2
  15. #define PRNG_MODE_TRNG 3
  16. struct prno_parm {
  17. u32 res;
  18. u32 reseed_counter;
  19. u64 stream_bytes;
  20. u8 V[112];
  21. u8 C[112];
  22. };
  23. struct prng_parm {
  24. u8 parm_block[32];
  25. u32 reseed_counter;
  26. u64 byte_counter;
  27. };
  28. static int check_prng(void)
  29. {
  30. if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
  31. sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
  32. return 0;
  33. }
  34. if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
  35. return PRNG_MODE_TRNG;
  36. if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
  37. return PRNG_MODE_SHA512;
  38. else
  39. return PRNG_MODE_TDES;
  40. }
  41. static int get_random(unsigned long limit, unsigned long *value)
  42. {
  43. struct prng_parm prng = {
  44. /* initial parameter block for tdes mode, copied from libica */
  45. .parm_block = {
  46. 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
  47. 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
  48. 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
  49. 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
  50. },
  51. };
  52. unsigned long seed, random;
  53. struct prno_parm prno;
  54. __u64 entropy[4];
  55. int mode, i;
  56. mode = check_prng();
  57. seed = get_tod_clock_fast();
  58. switch (mode) {
  59. case PRNG_MODE_TRNG:
  60. cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
  61. break;
  62. case PRNG_MODE_SHA512:
  63. cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
  64. (u8 *) &seed, sizeof(seed));
  65. cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
  66. sizeof(random), NULL, 0);
  67. break;
  68. case PRNG_MODE_TDES:
  69. /* add entropy */
  70. *(unsigned long *) prng.parm_block ^= seed;
  71. for (i = 0; i < 16; i++) {
  72. cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
  73. (u8 *) entropy, (u8 *) entropy,
  74. sizeof(entropy));
  75. memcpy(prng.parm_block, entropy, sizeof(entropy));
  76. }
  77. random = seed;
  78. cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
  79. (u8 *) &random, sizeof(random));
  80. break;
  81. default:
  82. return -1;
  83. }
  84. *value = random % limit;
  85. return 0;
  86. }
  87. /*
  88. * To randomize kernel base address we have to consider several facts:
  89. * 1. physical online memory might not be continuous and have holes. mem_detect
  90. * info contains list of online memory ranges we should consider.
  91. * 2. we have several memory regions which are occupied and we should not
  92. * overlap and destroy them. Currently safe_addr tells us the border below
  93. * which all those occupied regions are. We are safe to use anything above
  94. * safe_addr.
  95. * 3. the upper limit might apply as well, even if memory above that limit is
  96. * online. Currently those limitations are:
  97. * 3.1. Limit set by "mem=" kernel command line option
  98. * 3.2. memory reserved at the end for kasan initialization.
  99. * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
  100. * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
  101. * (16 pages when the kernel is built with kasan enabled)
  102. * Assumptions:
  103. * 1. kernel size (including .bss size) and upper memory limit are page aligned.
  104. * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
  105. * aligned (in practice memory configurations granularity on z/VM and LPAR
  106. * is 1mb).
  107. *
  108. * To guarantee uniform distribution of kernel base address among all suitable
  109. * addresses we generate random value just once. For that we need to build a
  110. * continuous range in which every value would be suitable. We can build this
  111. * range by simply counting all suitable addresses (let's call them positions)
  112. * which would be valid as kernel base address. To count positions we iterate
  113. * over online memory ranges. For each range which is big enough for the
  114. * kernel image we count all suitable addresses we can put the kernel image at
  115. * that is
  116. * (end - start - kernel_size) / THREAD_SIZE + 1
  117. * Two functions count_valid_kernel_positions and position_to_address help
  118. * to count positions in memory range given and then convert position back
  119. * to address.
  120. */
  121. static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
  122. unsigned long _min,
  123. unsigned long _max)
  124. {
  125. unsigned long start, end, pos = 0;
  126. int i;
  127. for_each_mem_detect_block(i, &start, &end) {
  128. if (_min >= end)
  129. continue;
  130. if (start >= _max)
  131. break;
  132. start = max(_min, start);
  133. end = min(_max, end);
  134. if (end - start < kernel_size)
  135. continue;
  136. pos += (end - start - kernel_size) / THREAD_SIZE + 1;
  137. }
  138. return pos;
  139. }
  140. static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
  141. unsigned long _min, unsigned long _max)
  142. {
  143. unsigned long start, end;
  144. int i;
  145. for_each_mem_detect_block(i, &start, &end) {
  146. if (_min >= end)
  147. continue;
  148. if (start >= _max)
  149. break;
  150. start = max(_min, start);
  151. end = min(_max, end);
  152. if (end - start < kernel_size)
  153. continue;
  154. if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
  155. return start + (pos - 1) * THREAD_SIZE;
  156. pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
  157. }
  158. return 0;
  159. }
  160. unsigned long get_random_base(unsigned long safe_addr)
  161. {
  162. unsigned long memory_limit = get_mem_detect_end();
  163. unsigned long base_pos, max_pos, kernel_size;
  164. int i;
  165. memory_limit = min(memory_limit, ident_map_size);
  166. /*
  167. * Avoid putting kernel in the end of physical memory
  168. * which kasan will use for shadow memory and early pgtable
  169. * mapping allocations.
  170. */
  171. memory_limit -= kasan_estimate_memory_needs(memory_limit);
  172. safe_addr = ALIGN(safe_addr, THREAD_SIZE);
  173. kernel_size = vmlinux.image_size + vmlinux.bss_size;
  174. if (safe_addr + kernel_size > memory_limit)
  175. return 0;
  176. max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
  177. if (!max_pos) {
  178. sclp_early_printk("KASLR disabled: not enough memory\n");
  179. return 0;
  180. }
  181. /* we need a value in the range [1, base_pos] inclusive */
  182. if (get_random(max_pos, &base_pos))
  183. return 0;
  184. return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
  185. }