mem_detect.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/errno.h>
  3. #include <linux/init.h>
  4. #include <asm/setup.h>
  5. #include <asm/processor.h>
  6. #include <asm/sclp.h>
  7. #include <asm/sections.h>
  8. #include <asm/mem_detect.h>
  9. #include <asm/sparsemem.h>
  10. #include "decompressor.h"
  11. #include "boot.h"
  12. struct mem_detect_info __bootdata(mem_detect);
  13. /* up to 256 storage elements, 1020 subincrements each */
  14. #define ENTRIES_EXTENDED_MAX \
  15. (256 * (1020 / 2) * sizeof(struct mem_detect_block))
  16. static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
  17. {
  18. if (n < MEM_INLINED_ENTRIES)
  19. return &mem_detect.entries[n];
  20. return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
  21. }
  22. /*
  23. * sequential calls to add_mem_detect_block with adjacent memory areas
  24. * are merged together into single memory block.
  25. */
  26. void add_mem_detect_block(u64 start, u64 end)
  27. {
  28. struct mem_detect_block *block;
  29. if (mem_detect.count) {
  30. block = __get_mem_detect_block_ptr(mem_detect.count - 1);
  31. if (block->end == start) {
  32. block->end = end;
  33. return;
  34. }
  35. }
  36. block = __get_mem_detect_block_ptr(mem_detect.count);
  37. block->start = start;
  38. block->end = end;
  39. mem_detect.count++;
  40. }
  41. static int __diag260(unsigned long rx1, unsigned long rx2)
  42. {
  43. unsigned long reg1, reg2, ry;
  44. union register_pair rx;
  45. psw_t old;
  46. int rc;
  47. rx.even = rx1;
  48. rx.odd = rx2;
  49. ry = 0x10; /* storage configuration */
  50. rc = -1; /* fail */
  51. asm volatile(
  52. " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
  53. " epsw %[reg1],%[reg2]\n"
  54. " st %[reg1],0(%[psw_pgm])\n"
  55. " st %[reg2],4(%[psw_pgm])\n"
  56. " larl %[reg1],1f\n"
  57. " stg %[reg1],8(%[psw_pgm])\n"
  58. " diag %[rx],%[ry],0x260\n"
  59. " ipm %[rc]\n"
  60. " srl %[rc],28\n"
  61. "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
  62. : [reg1] "=&d" (reg1),
  63. [reg2] "=&a" (reg2),
  64. [rc] "+&d" (rc),
  65. [ry] "+&d" (ry),
  66. "+Q" (S390_lowcore.program_new_psw),
  67. "=Q" (old)
  68. : [rx] "d" (rx.pair),
  69. [psw_old] "a" (&old),
  70. [psw_pgm] "a" (&S390_lowcore.program_new_psw)
  71. : "cc", "memory");
  72. return rc == 0 ? ry : -1;
  73. }
  74. static int diag260(void)
  75. {
  76. int rc, i;
  77. struct {
  78. unsigned long start;
  79. unsigned long end;
  80. } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
  81. memset(storage_extents, 0, sizeof(storage_extents));
  82. rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
  83. if (rc == -1)
  84. return -1;
  85. for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
  86. add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
  87. return 0;
  88. }
  89. static int tprot(unsigned long addr)
  90. {
  91. unsigned long reg1, reg2;
  92. int rc = -EFAULT;
  93. psw_t old;
  94. asm volatile(
  95. " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
  96. " epsw %[reg1],%[reg2]\n"
  97. " st %[reg1],0(%[psw_pgm])\n"
  98. " st %[reg2],4(%[psw_pgm])\n"
  99. " larl %[reg1],1f\n"
  100. " stg %[reg1],8(%[psw_pgm])\n"
  101. " tprot 0(%[addr]),0\n"
  102. " ipm %[rc]\n"
  103. " srl %[rc],28\n"
  104. "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
  105. : [reg1] "=&d" (reg1),
  106. [reg2] "=&a" (reg2),
  107. [rc] "+&d" (rc),
  108. "=Q" (S390_lowcore.program_new_psw.addr),
  109. "=Q" (old)
  110. : [psw_old] "a" (&old),
  111. [psw_pgm] "a" (&S390_lowcore.program_new_psw),
  112. [addr] "a" (addr)
  113. : "cc", "memory");
  114. return rc;
  115. }
  116. static unsigned long search_mem_end(void)
  117. {
  118. unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
  119. unsigned long offset = 0;
  120. unsigned long pivot;
  121. while (range > 1) {
  122. range >>= 1;
  123. pivot = offset + range;
  124. if (!tprot(pivot << 20))
  125. offset = pivot;
  126. }
  127. return (offset + 1) << 20;
  128. }
  129. unsigned long detect_memory(unsigned long *safe_addr)
  130. {
  131. unsigned long max_physmem_end = 0;
  132. sclp_early_get_memsize(&max_physmem_end);
  133. mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
  134. if (!sclp_early_read_storage_info()) {
  135. mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
  136. } else if (!diag260()) {
  137. mem_detect.info_source = MEM_DETECT_DIAG260;
  138. max_physmem_end = max_physmem_end ?: get_mem_detect_end();
  139. } else if (max_physmem_end) {
  140. add_mem_detect_block(0, max_physmem_end);
  141. mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
  142. } else {
  143. max_physmem_end = search_mem_end();
  144. add_mem_detect_block(0, max_physmem_end);
  145. mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
  146. }
  147. if (mem_detect.count > MEM_INLINED_ENTRIES) {
  148. *safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
  149. sizeof(struct mem_detect_block);
  150. }
  151. return max_physmem_end;
  152. }