vmlinux.lds.S 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/pgtable.h>
  3. #include <asm/cache.h>
  4. #include <asm/ptrace.h>
  5. #include <asm/thread_info.h>
  6. #define EMITS_PT_NOTE
  7. #define RO_EXCEPTION_TABLE_ALIGN 16
  8. #include <asm-generic/vmlinux.lds.h>
  9. OUTPUT_FORMAT("elf64-ia64-little")
  10. OUTPUT_ARCH(ia64)
  11. ENTRY(phys_start)
  12. jiffies = jiffies_64;
  13. PHDRS {
  14. text PT_LOAD;
  15. percpu PT_LOAD;
  16. data PT_LOAD;
  17. note PT_NOTE;
  18. unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
  19. }
  20. SECTIONS {
  21. /*
  22. * unwind exit sections must be discarded before
  23. * the rest of the sections get included.
  24. */
  25. /DISCARD/ : {
  26. *(.IA_64.unwind.exit.text)
  27. *(.IA_64.unwind_info.exit.text)
  28. *(.comment)
  29. *(.note)
  30. }
  31. v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
  32. phys_start = _start - LOAD_OFFSET;
  33. code : {
  34. } :text
  35. . = KERNEL_START;
  36. _text = .;
  37. _stext = .;
  38. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  39. __start_ivt_text = .;
  40. *(.text..ivt)
  41. __end_ivt_text = .;
  42. TEXT_TEXT
  43. SCHED_TEXT
  44. CPUIDLE_TEXT
  45. LOCK_TEXT
  46. KPROBES_TEXT
  47. IRQENTRY_TEXT
  48. SOFTIRQENTRY_TEXT
  49. *(.gnu.linkonce.t*)
  50. }
  51. .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
  52. *(.text2)
  53. }
  54. #ifdef CONFIG_SMP
  55. .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
  56. *(.text..lock)
  57. }
  58. #endif
  59. _etext = .;
  60. /*
  61. * Read-only data
  62. */
  63. /* MCA table */
  64. . = ALIGN(16);
  65. __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
  66. __start___mca_table = .;
  67. *(__mca_table)
  68. __stop___mca_table = .;
  69. }
  70. .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
  71. __start___phys_stack_reg_patchlist = .;
  72. *(.data..patch.phys_stack_reg)
  73. __end___phys_stack_reg_patchlist = .;
  74. }
  75. /*
  76. * Global data
  77. */
  78. _data = .;
  79. /* Unwind info & table: */
  80. . = ALIGN(8);
  81. .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
  82. *(.IA_64.unwind_info*)
  83. }
  84. .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
  85. __start_unwind = .;
  86. *(.IA_64.unwind*)
  87. __end_unwind = .;
  88. } :text :unwind
  89. code_continues2 : {
  90. } :text
  91. RO_DATA(4096)
  92. .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
  93. __start_opd = .;
  94. *(.opd)
  95. __end_opd = .;
  96. }
  97. /*
  98. * Initialization code and data:
  99. */
  100. . = ALIGN(PAGE_SIZE);
  101. __init_begin = .;
  102. INIT_TEXT_SECTION(PAGE_SIZE)
  103. INIT_DATA_SECTION(16)
  104. .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
  105. __start___vtop_patchlist = .;
  106. *(.data..patch.vtop)
  107. __end___vtop_patchlist = .;
  108. }
  109. .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
  110. __start___rse_patchlist = .;
  111. *(.data..patch.rse)
  112. __end___rse_patchlist = .;
  113. }
  114. .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
  115. __start___mckinley_e9_bundles = .;
  116. *(.data..patch.mckinley_e9)
  117. __end___mckinley_e9_bundles = .;
  118. }
  119. #ifdef CONFIG_SMP
  120. . = ALIGN(PERCPU_PAGE_SIZE);
  121. __cpu0_per_cpu = .;
  122. . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
  123. #endif
  124. . = ALIGN(PAGE_SIZE);
  125. __init_end = .;
  126. .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
  127. PAGE_ALIGNED_DATA(PAGE_SIZE)
  128. . = ALIGN(PAGE_SIZE);
  129. __start_gate_section = .;
  130. *(.data..gate)
  131. __stop_gate_section = .;
  132. }
  133. /*
  134. * make sure the gate page doesn't expose
  135. * kernel data
  136. */
  137. . = ALIGN(PAGE_SIZE);
  138. /* Per-cpu data: */
  139. . = ALIGN(PERCPU_PAGE_SIZE);
  140. PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
  141. __phys_per_cpu_start = __per_cpu_load;
  142. /*
  143. * ensure percpu data fits
  144. * into percpu page size
  145. */
  146. . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
  147. data : {
  148. } :data
  149. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  150. _sdata = .;
  151. INIT_TASK_DATA(PAGE_SIZE)
  152. CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
  153. READ_MOSTLY_DATA(SMP_CACHE_BYTES)
  154. DATA_DATA
  155. *(.data1)
  156. *(.gnu.linkonce.d*)
  157. CONSTRUCTORS
  158. }
  159. BUG_TABLE
  160. . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
  161. .got : AT(ADDR(.got) - LOAD_OFFSET) {
  162. *(.got.plt)
  163. *(.got)
  164. }
  165. __gp = ADDR(.got) + 0x200000;
  166. /*
  167. * We want the small data sections together,
  168. * so single-instruction offsets can access
  169. * them all, and initialized data all before
  170. * uninitialized, so we can shorten the
  171. * on-disk segment size.
  172. */
  173. .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
  174. *(.sdata)
  175. *(.sdata1)
  176. *(.srdata)
  177. }
  178. _edata = .;
  179. BSS_SECTION(0, 0, 0)
  180. _end = .;
  181. code : {
  182. } :text
  183. STABS_DEBUG
  184. DWARF_DEBUG
  185. ELF_DETAILS
  186. /* Default discards */
  187. DISCARDS
  188. }