1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * ld script for the x86 kernel 4 * 5 * Historic 32-bit version written by Martin M< 6 * 7 * Modernisation, unification and other change 8 * Copyright (C) 2007-2009 Sam Ravnborg <sam 9 * 10 * 11 * Don't define absolute symbols until and unl 12 * value is should remain constant even if ker 13 * at run time. Absolute symbols are not reloc 14 * change if kernel is relocated, make the sym 15 * put it inside the section definition. 16 */ 17 18 #define LOAD_OFFSET __START_KERNEL_map 19 20 #define RUNTIME_DISCARD_EXIT 21 #define EMITS_PT_NOTE 22 #define RO_EXCEPTION_TABLE_ALIGN 16 23 24 #include <asm-generic/vmlinux.lds.h> 25 #include <asm/asm-offsets.h> 26 #include <asm/thread_info.h> 27 #include <asm/page_types.h> 28 #include <asm/orc_lookup.h> 29 #include <asm/cache.h> 30 #include <asm/boot.h> 31 32 #undef i386 /* in case the preprocessor is 33 34 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) 35 36 #ifdef CONFIG_X86_32 37 OUTPUT_ARCH(i386) 38 ENTRY(phys_startup_32) 39 #else 40 OUTPUT_ARCH(i386:x86-64) 41 ENTRY(phys_startup_64) 42 #endif 43 44 jiffies = jiffies_64; 45 const_pcpu_hot = pcpu_hot; 46 47 #if defined(CONFIG_X86_64) 48 /* 49 * On 64-bit, align RODATA to 2MB so we retain 50 * boundaries spanning kernel text, rodata and 51 * 52 * However, kernel identity mappings will have 53 * to the pages mapping to text and to the pag 54 * text section. Hence kernel identity mapping 55 * pages. For 64-bit, kernel text and kernel i 56 * so we can enable protection checks as well 57 * mappings for kernel text. 58 */ 59 #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAG 60 61 #define X86_ALIGN_RODATA_END 62 . = ALIGN(HPAGE_SIZE); 63 __end_rodata_hpage_align = .; 64 __end_rodata_aligned = .; 65 66 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_ 67 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_ 68 69 /* 70 * This section contains data which will be ma 71 * encryption operates on a page basis. Make t 72 * to avoid splitting the pages while mapping 73 * 74 * Note: We use a separate section so that onl 75 * decrypted to avoid exposing more than we wi 76 */ 77 #define BSS_DECRYPTED 78 . = ALIGN(PMD_SIZE); 79 __start_bss_decrypted = .; 80 *(.bss..decrypted); 81 . = ALIGN(PAGE_SIZE); 82 __start_bss_decrypted_unused = .; 83 . = ALIGN(PMD_SIZE); 84 __end_bss_decrypted = .; 85 86 #else 87 88 #define X86_ALIGN_RODATA_BEGIN 89 #define X86_ALIGN_RODATA_END 90 . = ALIGN(PAGE_SIZE); 91 __end_rodata_aligned = .; 92 93 #define ALIGN_ENTRY_TEXT_BEGIN 94 #define ALIGN_ENTRY_TEXT_END 95 #define BSS_DECRYPTED 96 97 #endif 98 99 PHDRS { 100 text PT_LOAD FLAGS(5); /* R_E 101 data PT_LOAD FLAGS(6); /* RW_ 102 #ifdef CONFIG_X86_64 103 #ifdef CONFIG_SMP 104 percpu PT_LOAD FLAGS(6); /* RW_ 105 #endif 106 init PT_LOAD FLAGS(7); /* RWE 107 #endif 108 note PT_NOTE FLAGS(0); /* ___ 109 } 110 111 SECTIONS 112 { 113 . = __START_KERNEL; 114 #ifdef CONFIG_X86_32 115 phys_startup_32 = ABSOLUTE(startup_32 116 #else 117 phys_startup_64 = ABSOLUTE(startup_64 118 #endif 119 120 /* Text and read-only data */ 121 .text : AT(ADDR(.text) - LOAD_OFFSET) 122 _text = .; 123 _stext = .; 124 /* bootstrapping code */ 125 HEAD_TEXT 126 TEXT_TEXT 127 SCHED_TEXT 128 LOCK_TEXT 129 KPROBES_TEXT 130 SOFTIRQENTRY_TEXT 131 #ifdef CONFIG_MITIGATION_RETPOLINE 132 *(.text..__x86.indirect_thunk) 133 *(.text..__x86.return_thunk) 134 #endif 135 STATIC_CALL_TEXT 136 137 ALIGN_ENTRY_TEXT_BEGIN 138 *(.text..__x86.rethunk_untrain 139 ENTRY_TEXT 140 141 #ifdef CONFIG_MITIGATION_SRSO 142 /* 143 * See the comment above srso_ 144 * definition. 145 */ 146 . = srso_alias_untrain_ret | ( 147 *(.text..__x86.rethunk_safe) 148 #endif 149 ALIGN_ENTRY_TEXT_END 150 *(.gnu.warning) 151 152 } :text = 0xcccccccc 153 154 /* End of text section, which should o 155 _etext = .; 156 . = ALIGN(PAGE_SIZE); 157 158 X86_ALIGN_RODATA_BEGIN 159 RO_DATA(PAGE_SIZE) 160 X86_ALIGN_RODATA_END 161 162 /* Data */ 163 .data : AT(ADDR(.data) - LOAD_OFFSET) 164 /* Start of data section */ 165 _sdata = .; 166 167 /* init_task */ 168 INIT_TASK_DATA(THREAD_SIZE) 169 170 /* equivalent to task_pt_regs( 171 __top_init_kernel_stack = __en 172 173 #ifdef CONFIG_X86_32 174 /* 32 bit has nosave before _e 175 NOSAVE_DATA 176 #endif 177 178 PAGE_ALIGNED_DATA(PAGE_SIZE) 179 180 CACHELINE_ALIGNED_DATA(L1_CACH 181 182 DATA_DATA 183 CONSTRUCTORS 184 185 /* rarely changed data like cp 186 READ_MOSTLY_DATA(INTERNODE_CAC 187 188 /* End of data section */ 189 _edata = .; 190 } :data 191 192 BUG_TABLE 193 194 ORC_UNWIND_TABLE 195 196 . = ALIGN(PAGE_SIZE); 197 __vvar_page = .; 198 199 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) 200 /* work around gold bug 13023 201 __vvar_beginning_hack = .; 202 203 /* Place all vvars at the offs 204 #define EMIT_VVAR(name, offset) 205 . = __vvar_beginning_hack + of 206 *(.vvar_ ## name) 207 #include <asm/vvar.h> 208 #undef EMIT_VVAR 209 210 /* 211 * Pad the rest of the page wi 212 * can leave garbage here. 213 */ 214 . = __vvar_beginning_hack + PA 215 } :data 216 217 . = ALIGN(__vvar_page + PAGE_SIZE, PAG 218 219 /* Init code and data - will be freed 220 . = ALIGN(PAGE_SIZE); 221 .init.begin : AT(ADDR(.init.begin) - L 222 __init_begin = .; /* paired wi 223 } 224 225 #if defined(CONFIG_X86_64) && defined(CONFIG_S 226 /* 227 * percpu offsets are zero-based on SM 228 * output PHDR, so the next output sec 229 * start another segment - init. 230 */ 231 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, 232 ASSERT(SIZEOF(.data..percpu) < CONFIG_ 233 "per-CPU data too large - incre 234 #endif 235 236 INIT_TEXT_SECTION(PAGE_SIZE) 237 #ifdef CONFIG_X86_64 238 :init 239 #endif 240 241 /* 242 * Section for code used exclusively b 243 * references to such code must be pat 244 * by using X86_FEATURE_ALWAYS CPU fea 245 * 246 * See static_cpu_has() for an example 247 */ 248 .altinstr_aux : AT(ADDR(.altinstr_aux) 249 *(.altinstr_aux) 250 } 251 252 INIT_DATA_SECTION(16) 253 254 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_d 255 __x86_cpu_dev_start = .; 256 *(.x86_cpu_dev.init) 257 __x86_cpu_dev_end = .; 258 } 259 260 #ifdef CONFIG_X86_INTEL_MID 261 .x86_intel_mid_dev.init : AT(ADDR(.x86 262 263 __x86_intel_mid_dev_start = .; 264 *(.x86_intel_mid_dev.init) 265 __x86_intel_mid_dev_end = .; 266 } 267 #endif 268 269 #ifdef CONFIG_MITIGATION_RETPOLINE 270 /* 271 * List of instructions that call/jmp/ 272 * __x86_indirect_thunk_*(). These ins 273 * with alternatives, after which the 274 */ 275 . = ALIGN(8); 276 .retpoline_sites : AT(ADDR(.retpoline_ 277 __retpoline_sites = .; 278 *(.retpoline_sites) 279 __retpoline_sites_end = .; 280 } 281 282 . = ALIGN(8); 283 .return_sites : AT(ADDR(.return_sites) 284 __return_sites = .; 285 *(.return_sites) 286 __return_sites_end = .; 287 } 288 289 . = ALIGN(8); 290 .call_sites : AT(ADDR(.call_sites) - L 291 __call_sites = .; 292 *(.call_sites) 293 __call_sites_end = .; 294 } 295 #endif 296 297 #ifdef CONFIG_X86_KERNEL_IBT 298 . = ALIGN(8); 299 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_s 300 __ibt_endbr_seal = .; 301 *(.ibt_endbr_seal) 302 __ibt_endbr_seal_end = .; 303 } 304 #endif 305 306 #ifdef CONFIG_FINEIBT 307 . = ALIGN(8); 308 .cfi_sites : AT(ADDR(.cfi_sites) - LOA 309 __cfi_sites = .; 310 *(.cfi_sites) 311 __cfi_sites_end = .; 312 } 313 #endif 314 315 /* 316 * struct alt_inst entries. From the h 317 * "Alternative instructions for diffe 318 * Think locking instructions on spinl 319 */ 320 . = ALIGN(8); 321 .altinstructions : AT(ADDR(.altinstruc 322 __alt_instructions = .; 323 *(.altinstructions) 324 __alt_instructions_end = .; 325 } 326 327 /* 328 * And here are the replacement instru 329 * them as binary blobs. The .altinstr 330 * get the address and the length of t 331 */ 332 .altinstr_replacement : AT(ADDR(.altin 333 *(.altinstr_replacement) 334 } 335 336 . = ALIGN(8); 337 .apicdrivers : AT(ADDR(.apicdrivers) - 338 __apicdrivers = .; 339 *(.apicdrivers); 340 __apicdrivers_end = .; 341 } 342 343 . = ALIGN(8); 344 /* 345 * .exit.text is discarded at runtime, 346 * references from .altinstructions 347 */ 348 .exit.text : AT(ADDR(.exit.text) - LOA 349 EXIT_TEXT 350 } 351 352 .exit.data : AT(ADDR(.exit.data) - LOA 353 EXIT_DATA 354 } 355 356 #if !defined(CONFIG_X86_64) || !defined(CONFIG 357 PERCPU_SECTION(INTERNODE_CACHE_BYTES) 358 #endif 359 360 RUNTIME_CONST_VARIABLES 361 RUNTIME_CONST(ptr, USER_PTR_MAX) 362 363 . = ALIGN(PAGE_SIZE); 364 365 /* freed after init ends here */ 366 .init.end : AT(ADDR(.init.end) - LOAD_ 367 __init_end = .; 368 } 369 370 /* 371 * smp_locks might be freed after init 372 * start/end must be page aligned 373 */ 374 . = ALIGN(PAGE_SIZE); 375 .smp_locks : AT(ADDR(.smp_locks) - LOA 376 __smp_locks = .; 377 *(.smp_locks) 378 . = ALIGN(PAGE_SIZE); 379 __smp_locks_end = .; 380 } 381 382 #ifdef CONFIG_X86_64 383 .data_nosave : AT(ADDR(.data_nosave) - 384 NOSAVE_DATA 385 } 386 #endif 387 388 /* BSS */ 389 . = ALIGN(PAGE_SIZE); 390 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 391 __bss_start = .; 392 *(.bss..page_aligned) 393 . = ALIGN(PAGE_SIZE); 394 *(BSS_MAIN) 395 BSS_DECRYPTED 396 . = ALIGN(PAGE_SIZE); 397 __bss_stop = .; 398 } 399 400 /* 401 * The memory occupied from _text to h 402 * automatically reserved in setup_arc 403 * explicitly reserved using memblock_ 404 * and treated as available memory. 405 */ 406 __end_of_kernel_reserve = .; 407 408 . = ALIGN(PAGE_SIZE); 409 .brk : AT(ADDR(.brk) - LOAD_OFFSET) { 410 __brk_base = .; 411 . += 64 * 1024; /* 64k 412 *(.bss..brk) /* are 413 __brk_limit = .; 414 } 415 416 . = ALIGN(PAGE_SIZE); /* kee 417 _end = .; 418 419 #ifdef CONFIG_AMD_MEM_ENCRYPT 420 /* 421 * Early scratch/workarea section: Liv 422 * (_text - _end). 423 * 424 * Resides after _end because even tho 425 * __end_of_kernel_reserve, the .brk s 426 * part of the kernel. Since it is loc 427 * it will be discarded and become par 428 * such, it can only be used by very e 429 * needed afterwards. 430 * 431 * Currently used by SME for performin 432 * kernel during boot. Resides on a 2M 433 * pagetable setup used for SME in-pla 434 */ 435 . = ALIGN(HPAGE_SIZE); 436 .init.scratch : AT(ADDR(.init.scratch) 437 __init_scratch_begin = .; 438 *(.init.scratch) 439 . = ALIGN(HPAGE_SIZE); 440 __init_scratch_end = .; 441 } 442 #endif 443 444 STABS_DEBUG 445 DWARF_DEBUG 446 ELF_DETAILS 447 448 DISCARDS 449 450 /* 451 * Make sure that the .got.plt is eith 452 * contains only the lazy dispatch ent 453 */ 454 .got.plt (INFO) : { *(.got.plt) } 455 ASSERT(SIZEOF(.got.plt) == 0 || 456 #ifdef CONFIG_X86_64 457 SIZEOF(.got.plt) == 0x18, 458 #else 459 SIZEOF(.got.plt) == 0xc, 460 #endif 461 "Unexpected GOT/PLT entries det 462 463 /* 464 * Sections that should stay zero size 465 * explicitly check instead of blindly 466 */ 467 .got : { 468 *(.got) *(.igot.*) 469 } 470 ASSERT(SIZEOF(.got) == 0, "Unexpected 471 472 .plt : { 473 *(.plt) *(.plt.*) *(.iplt) 474 } 475 ASSERT(SIZEOF(.plt) == 0, "Unexpected 476 477 .rel.dyn : { 478 *(.rel.*) *(.rel_*) 479 } 480 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpec 481 482 .rela.dyn : { 483 *(.rela.*) *(.rela_*) 484 } 485 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpe 486 } 487 488 /* 489 * The ASSERT() sink to . is intentional, for 490 */ 491 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE 492 "kernel image bigger than KERNEL_IM 493 494 #ifdef CONFIG_X86_64 495 /* 496 * Per-cpu symbols which need to be offset fro 497 * for the boot processor. 498 */ 499 #define INIT_PER_CPU(x) init_per_cpu__##x = AB 500 INIT_PER_CPU(gdt_page); 501 INIT_PER_CPU(fixed_percpu_data); 502 INIT_PER_CPU(irq_stack_backing_store); 503 504 #ifdef CONFIG_SMP 505 . = ASSERT((fixed_percpu_data == 0), 506 "fixed_percpu_data is not at start 507 #endif 508 509 #ifdef CONFIG_MITIGATION_UNRET_ENTRY 510 . = ASSERT((retbleed_return_thunk & 0x3f) == 0 511 #endif 512 513 #ifdef CONFIG_MITIGATION_SRSO 514 . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_ 515 /* 516 * GNU ld cannot do XOR until 2.41. 517 * https://sourceware.org/git/?p=binutils-gdb. 518 * 519 * LLVM lld cannot do XOR until lld-17. 520 * https://github.com/llvm/llvm-project/commit 521 * 522 * Instead do: (A | B) - (A & B) in order to c 523 * of the two function addresses: 524 */ 525 . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) 526 (ABSOLUTE(srso_alias_untrain_r 527 "SRSO function pair won't alia 528 #endif 529 530 #endif /* CONFIG_X86_64 */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.