~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/vmlinux.lds.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * ld script for the x86 kernel
  4  *
  5  * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  6  *
  7  * Modernisation, unification and other changes and fixes:
  8  *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
  9  *
 10  *
 11  * Don't define absolute symbols until and unless you know that symbol
 12  * value is should remain constant even if kernel image is relocated
 13  * at run time. Absolute symbols are not relocated. If symbol value should
 14  * change if kernel is relocated, make the symbol section relative and
 15  * put it inside the section definition.
 16  */
 17 
 18 #define LOAD_OFFSET __START_KERNEL_map
 19 
 20 #define RUNTIME_DISCARD_EXIT
 21 #define EMITS_PT_NOTE
 22 #define RO_EXCEPTION_TABLE_ALIGN        16
 23 
 24 #include <asm-generic/vmlinux.lds.h>
 25 #include <asm/asm-offsets.h>
 26 #include <asm/thread_info.h>
 27 #include <asm/page_types.h>
 28 #include <asm/orc_lookup.h>
 29 #include <asm/cache.h>
 30 #include <asm/boot.h>
 31 
 32 #undef i386     /* in case the preprocessor is a 32bit one */
 33 
 34 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
 35 
 36 #ifdef CONFIG_X86_32
 37 OUTPUT_ARCH(i386)
 38 ENTRY(phys_startup_32)
 39 #else
 40 OUTPUT_ARCH(i386:x86-64)
 41 ENTRY(phys_startup_64)
 42 #endif
 43 
 44 jiffies = jiffies_64;
 45 const_pcpu_hot = pcpu_hot;
 46 
 47 #if defined(CONFIG_X86_64)
 48 /*
 49  * On 64-bit, align RODATA to 2MB so we retain large page mappings for
 50  * boundaries spanning kernel text, rodata and data sections.
 51  *
 52  * However, kernel identity mappings will have different RWX permissions
 53  * to the pages mapping to text and to the pages padding (which are freed) the
 54  * text section. Hence kernel identity mappings will be broken to smaller
 55  * pages. For 64-bit, kernel text and kernel identity mappings are different,
 56  * so we can enable protection checks as well as retain 2MB large page
 57  * mappings for kernel text.
 58  */
 59 #define X86_ALIGN_RODATA_BEGIN  . = ALIGN(HPAGE_SIZE);
 60 
 61 #define X86_ALIGN_RODATA_END                                    \
 62                 . = ALIGN(HPAGE_SIZE);                          \
 63                 __end_rodata_hpage_align = .;                   \
 64                 __end_rodata_aligned = .;
 65 
 66 #define ALIGN_ENTRY_TEXT_BEGIN  . = ALIGN(PMD_SIZE);
 67 #define ALIGN_ENTRY_TEXT_END    . = ALIGN(PMD_SIZE);
 68 
 69 /*
 70  * This section contains data which will be mapped as decrypted. Memory
 71  * encryption operates on a page basis. Make this section PMD-aligned
 72  * to avoid splitting the pages while mapping the section early.
 73  *
 74  * Note: We use a separate section so that only this section gets
 75  * decrypted to avoid exposing more than we wish.
 76  */
 77 #define BSS_DECRYPTED                                           \
 78         . = ALIGN(PMD_SIZE);                                    \
 79         __start_bss_decrypted = .;                              \
 80         *(.bss..decrypted);                                     \
 81         . = ALIGN(PAGE_SIZE);                                   \
 82         __start_bss_decrypted_unused = .;                       \
 83         . = ALIGN(PMD_SIZE);                                    \
 84         __end_bss_decrypted = .;                                \
 85 
 86 #else
 87 
 88 #define X86_ALIGN_RODATA_BEGIN
 89 #define X86_ALIGN_RODATA_END                                    \
 90                 . = ALIGN(PAGE_SIZE);                           \
 91                 __end_rodata_aligned = .;
 92 
 93 #define ALIGN_ENTRY_TEXT_BEGIN
 94 #define ALIGN_ENTRY_TEXT_END
 95 #define BSS_DECRYPTED
 96 
 97 #endif
 98 
 99 PHDRS {
100         text PT_LOAD FLAGS(5);          /* R_E */
101         data PT_LOAD FLAGS(6);          /* RW_ */
102 #ifdef CONFIG_X86_64
103 #ifdef CONFIG_SMP
104         percpu PT_LOAD FLAGS(6);        /* RW_ */
105 #endif
106         init PT_LOAD FLAGS(7);          /* RWE */
107 #endif
108         note PT_NOTE FLAGS(0);          /* ___ */
109 }
110 
111 SECTIONS
112 {
113         . = __START_KERNEL;
114 #ifdef CONFIG_X86_32
115         phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
116 #else
117         phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
118 #endif
119 
120         /* Text and read-only data */
121         .text :  AT(ADDR(.text) - LOAD_OFFSET) {
122                 _text = .;
123                 _stext = .;
124                 /* bootstrapping code */
125                 HEAD_TEXT
126                 TEXT_TEXT
127                 SCHED_TEXT
128                 LOCK_TEXT
129                 KPROBES_TEXT
130                 SOFTIRQENTRY_TEXT
131 #ifdef CONFIG_MITIGATION_RETPOLINE
132                 *(.text..__x86.indirect_thunk)
133                 *(.text..__x86.return_thunk)
134 #endif
135                 STATIC_CALL_TEXT
136 
137                 ALIGN_ENTRY_TEXT_BEGIN
138                 *(.text..__x86.rethunk_untrain)
139                 ENTRY_TEXT
140 
141 #ifdef CONFIG_MITIGATION_SRSO
142                 /*
143                  * See the comment above srso_alias_untrain_ret()'s
144                  * definition.
145                  */
146                 . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
147                 *(.text..__x86.rethunk_safe)
148 #endif
149                 ALIGN_ENTRY_TEXT_END
150                 *(.gnu.warning)
151 
152         } :text = 0xcccccccc
153 
154         /* End of text section, which should occupy whole number of pages */
155         _etext = .;
156         . = ALIGN(PAGE_SIZE);
157 
158         X86_ALIGN_RODATA_BEGIN
159         RO_DATA(PAGE_SIZE)
160         X86_ALIGN_RODATA_END
161 
162         /* Data */
163         .data : AT(ADDR(.data) - LOAD_OFFSET) {
164                 /* Start of data section */
165                 _sdata = .;
166 
167                 /* init_task */
168                 INIT_TASK_DATA(THREAD_SIZE)
169 
170                 /* equivalent to task_pt_regs(&init_task) */
171                 __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
172 
173 #ifdef CONFIG_X86_32
174                 /* 32 bit has nosave before _edata */
175                 NOSAVE_DATA
176 #endif
177 
178                 PAGE_ALIGNED_DATA(PAGE_SIZE)
179 
180                 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
181 
182                 DATA_DATA
183                 CONSTRUCTORS
184 
185                 /* rarely changed data like cpu maps */
186                 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
187 
188                 /* End of data section */
189                 _edata = .;
190         } :data
191 
192         BUG_TABLE
193 
194         ORC_UNWIND_TABLE
195 
196         . = ALIGN(PAGE_SIZE);
197         __vvar_page = .;
198 
199         .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
200                 /* work around gold bug 13023 */
201                 __vvar_beginning_hack = .;
202 
203                 /* Place all vvars at the offsets in asm/vvar.h. */
204 #define EMIT_VVAR(name, offset)                         \
205                 . = __vvar_beginning_hack + offset;     \
206                 *(.vvar_ ## name)
207 #include <asm/vvar.h>
208 #undef EMIT_VVAR
209 
210                 /*
211                  * Pad the rest of the page with zeros.  Otherwise the loader
212                  * can leave garbage here.
213                  */
214                 . = __vvar_beginning_hack + PAGE_SIZE;
215         } :data
216 
217         . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
218 
219         /* Init code and data - will be freed after init */
220         . = ALIGN(PAGE_SIZE);
221         .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
222                 __init_begin = .; /* paired with __init_end */
223         }
224 
225 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
226         /*
227          * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
228          * output PHDR, so the next output section - .init.text - should
229          * start another segment - init.
230          */
231         PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
232         ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
233                "per-CPU data too large - increase CONFIG_PHYSICAL_START")
234 #endif
235 
236         INIT_TEXT_SECTION(PAGE_SIZE)
237 #ifdef CONFIG_X86_64
238         :init
239 #endif
240 
241         /*
242          * Section for code used exclusively before alternatives are run. All
243          * references to such code must be patched out by alternatives, normally
244          * by using X86_FEATURE_ALWAYS CPU feature bit.
245          *
246          * See static_cpu_has() for an example.
247          */
248         .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
249                 *(.altinstr_aux)
250         }
251 
252         INIT_DATA_SECTION(16)
253 
254         .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
255                 __x86_cpu_dev_start = .;
256                 *(.x86_cpu_dev.init)
257                 __x86_cpu_dev_end = .;
258         }
259 
260 #ifdef CONFIG_X86_INTEL_MID
261         .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
262                                                                 LOAD_OFFSET) {
263                 __x86_intel_mid_dev_start = .;
264                 *(.x86_intel_mid_dev.init)
265                 __x86_intel_mid_dev_end = .;
266         }
267 #endif
268 
269 #ifdef CONFIG_MITIGATION_RETPOLINE
270         /*
271          * List of instructions that call/jmp/jcc to retpoline thunks
272          * __x86_indirect_thunk_*(). These instructions can be patched along
273          * with alternatives, after which the section can be freed.
274          */
275         . = ALIGN(8);
276         .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
277                 __retpoline_sites = .;
278                 *(.retpoline_sites)
279                 __retpoline_sites_end = .;
280         }
281 
282         . = ALIGN(8);
283         .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
284                 __return_sites = .;
285                 *(.return_sites)
286                 __return_sites_end = .;
287         }
288 
289         . = ALIGN(8);
290         .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
291                 __call_sites = .;
292                 *(.call_sites)
293                 __call_sites_end = .;
294         }
295 #endif
296 
297 #ifdef CONFIG_X86_KERNEL_IBT
298         . = ALIGN(8);
299         .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
300                 __ibt_endbr_seal = .;
301                 *(.ibt_endbr_seal)
302                 __ibt_endbr_seal_end = .;
303         }
304 #endif
305 
306 #ifdef CONFIG_FINEIBT
307         . = ALIGN(8);
308         .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
309                 __cfi_sites = .;
310                 *(.cfi_sites)
311                 __cfi_sites_end = .;
312         }
313 #endif
314 
315         /*
316          * struct alt_inst entries. From the header (alternative.h):
317          * "Alternative instructions for different CPU types or capabilities"
318          * Think locking instructions on spinlocks.
319          */
320         . = ALIGN(8);
321         .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
322                 __alt_instructions = .;
323                 *(.altinstructions)
324                 __alt_instructions_end = .;
325         }
326 
327         /*
328          * And here are the replacement instructions. The linker sticks
329          * them as binary blobs. The .altinstructions has enough data to
330          * get the address and the length of them to patch the kernel safely.
331          */
332         .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
333                 *(.altinstr_replacement)
334         }
335 
336         . = ALIGN(8);
337         .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
338                 __apicdrivers = .;
339                 *(.apicdrivers);
340                 __apicdrivers_end = .;
341         }
342 
343         . = ALIGN(8);
344         /*
345          * .exit.text is discarded at runtime, not link time, to deal with
346          *  references from .altinstructions
347          */
348         .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
349                 EXIT_TEXT
350         }
351 
352         .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
353                 EXIT_DATA
354         }
355 
356 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
357         PERCPU_SECTION(INTERNODE_CACHE_BYTES)
358 #endif
359 
360         RUNTIME_CONST_VARIABLES
361         RUNTIME_CONST(ptr, USER_PTR_MAX)
362 
363         . = ALIGN(PAGE_SIZE);
364 
365         /* freed after init ends here */
366         .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
367                 __init_end = .;
368         }
369 
370         /*
371          * smp_locks might be freed after init
372          * start/end must be page aligned
373          */
374         . = ALIGN(PAGE_SIZE);
375         .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
376                 __smp_locks = .;
377                 *(.smp_locks)
378                 . = ALIGN(PAGE_SIZE);
379                 __smp_locks_end = .;
380         }
381 
382 #ifdef CONFIG_X86_64
383         .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
384                 NOSAVE_DATA
385         }
386 #endif
387 
388         /* BSS */
389         . = ALIGN(PAGE_SIZE);
390         .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
391                 __bss_start = .;
392                 *(.bss..page_aligned)
393                 . = ALIGN(PAGE_SIZE);
394                 *(BSS_MAIN)
395                 BSS_DECRYPTED
396                 . = ALIGN(PAGE_SIZE);
397                 __bss_stop = .;
398         }
399 
400         /*
401          * The memory occupied from _text to here, __end_of_kernel_reserve, is
402          * automatically reserved in setup_arch(). Anything after here must be
403          * explicitly reserved using memblock_reserve() or it will be discarded
404          * and treated as available memory.
405          */
406         __end_of_kernel_reserve = .;
407 
408         . = ALIGN(PAGE_SIZE);
409         .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
410                 __brk_base = .;
411                 . += 64 * 1024;         /* 64k alignment slop space */
412                 *(.bss..brk)            /* areas brk users have reserved */
413                 __brk_limit = .;
414         }
415 
416         . = ALIGN(PAGE_SIZE);           /* keep VO_INIT_SIZE page aligned */
417         _end = .;
418 
419 #ifdef CONFIG_AMD_MEM_ENCRYPT
420         /*
421          * Early scratch/workarea section: Lives outside of the kernel proper
422          * (_text - _end).
423          *
424          * Resides after _end because even though the .brk section is after
425          * __end_of_kernel_reserve, the .brk section is later reserved as a
426          * part of the kernel. Since it is located after __end_of_kernel_reserve
427          * it will be discarded and become part of the available memory. As
428          * such, it can only be used by very early boot code and must not be
429          * needed afterwards.
430          *
431          * Currently used by SME for performing in-place encryption of the
432          * kernel during boot. Resides on a 2MB boundary to simplify the
433          * pagetable setup used for SME in-place encryption.
434          */
435         . = ALIGN(HPAGE_SIZE);
436         .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
437                 __init_scratch_begin = .;
438                 *(.init.scratch)
439                 . = ALIGN(HPAGE_SIZE);
440                 __init_scratch_end = .;
441         }
442 #endif
443 
444         STABS_DEBUG
445         DWARF_DEBUG
446         ELF_DETAILS
447 
448         DISCARDS
449 
450         /*
451          * Make sure that the .got.plt is either completely empty or it
452          * contains only the lazy dispatch entries.
453          */
454         .got.plt (INFO) : { *(.got.plt) }
455         ASSERT(SIZEOF(.got.plt) == 0 ||
456 #ifdef CONFIG_X86_64
457                SIZEOF(.got.plt) == 0x18,
458 #else
459                SIZEOF(.got.plt) == 0xc,
460 #endif
461                "Unexpected GOT/PLT entries detected!")
462 
463         /*
464          * Sections that should stay zero sized, which is safer to
465          * explicitly check instead of blindly discarding.
466          */
467         .got : {
468                 *(.got) *(.igot.*)
469         }
470         ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
471 
472         .plt : {
473                 *(.plt) *(.plt.*) *(.iplt)
474         }
475         ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
476 
477         .rel.dyn : {
478                 *(.rel.*) *(.rel_*)
479         }
480         ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
481 
482         .rela.dyn : {
483                 *(.rela.*) *(.rela_*)
484         }
485         ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
486 }
487 
488 /*
489  * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
490  */
491 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
492            "kernel image bigger than KERNEL_IMAGE_SIZE");
493 
494 #ifdef CONFIG_X86_64
495 /*
496  * Per-cpu symbols which need to be offset from __per_cpu_load
497  * for the boot processor.
498  */
499 #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
500 INIT_PER_CPU(gdt_page);
501 INIT_PER_CPU(fixed_percpu_data);
502 INIT_PER_CPU(irq_stack_backing_store);
503 
504 #ifdef CONFIG_SMP
505 . = ASSERT((fixed_percpu_data == 0),
506            "fixed_percpu_data is not at start of per-cpu area");
507 #endif
508 
509 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
510 . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
511 #endif
512 
513 #ifdef CONFIG_MITIGATION_SRSO
514 . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
515 /*
516  * GNU ld cannot do XOR until 2.41.
517  * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
518  *
519  * LLVM lld cannot do XOR until lld-17.
520  * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
521  *
522  * Instead do: (A | B) - (A & B) in order to compute the XOR
523  * of the two function addresses:
524  */
525 . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
526                 (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
527                 "SRSO function pair won't alias");
528 #endif
529 
530 #endif /* CONFIG_X86_64 */

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php