1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 - Google LLC 4 * Author: David Brazdil <dbrazdil@google.com> 5 * 6 * Generates relocation information used by the kernel to convert 7 * absolute addresses in hyp data from kernel VAs to hyp VAs. 8 * 9 * This is necessary because hyp code is linked into the same binary 10 * as the kernel but executes under different memory mappings. 11 * If the compiler used absolute addressing, those addresses need to 12 * be converted before they are used by hyp code. 13 * 14 * The input of this program is the relocatable ELF object containing 15 * all hyp code/data, not yet linked into vmlinux. Hyp section names 16 * should have been prefixed with `.hyp` at this point. 17 * 18 * The output (printed to stdout) is an assembly file containing 19 * an array of 32-bit integers and static relocations that instruct 20 * the linker of `vmlinux` to populate the array entries with offsets 21 * to positions in the kernel binary containing VAs used by hyp code. 22 * 23 * Note that dynamic relocations could be used for the same purpose. 24 * However, those are only generated if CONFIG_RELOCATABLE=y. 25 */ 26 27 #include <elf.h> 28 #include <endian.h> 29 #include <errno.h> 30 #include <fcntl.h> 31 #include <stdbool.h> 32 #include <stdio.h> 33 #include <stdlib.h> 34 #include <string.h> 35 #include <sys/mman.h> 36 #include <sys/types.h> 37 #include <sys/stat.h> 38 #include <unistd.h> 39 40 #include <generated/autoconf.h> 41 42 #define HYP_SECTION_PREFIX ".hyp" 43 #define HYP_RELOC_SECTION ".hyp.reloc" 44 #define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_" 45 46 /* 47 * AArch64 relocation type constants. 48 * Included in case these are not defined in the host toolchain. 49 */ 50 #ifndef R_AARCH64_ABS64 51 #define R_AARCH64_ABS64 257 52 #endif 53 #ifndef R_AARCH64_ABS32 54 #define R_AARCH64_ABS32 258 55 #endif 56 #ifndef R_AARCH64_PREL64 57 #define R_AARCH64_PREL64 260 58 #endif 59 #ifndef R_AARCH64_PREL32 60 #define R_AARCH64_PREL32 261 61 #endif 62 #ifndef R_AARCH64_PREL16 63 #define R_AARCH64_PREL16 262 64 #endif 65 #ifndef R_AARCH64_PLT32 66 #define R_AARCH64_PLT32 314 67 #endif 68 #ifndef R_AARCH64_LD_PREL_LO19 69 #define R_AARCH64_LD_PREL_LO19 273 70 #endif 71 #ifndef R_AARCH64_ADR_PREL_LO21 72 #define R_AARCH64_ADR_PREL_LO21 274 73 #endif 74 #ifndef R_AARCH64_ADR_PREL_PG_HI21 75 #define R_AARCH64_ADR_PREL_PG_HI21 275 76 #endif 77 #ifndef R_AARCH64_ADR_PREL_PG_HI21_NC 78 #define R_AARCH64_ADR_PREL_PG_HI21_NC 276 79 #endif 80 #ifndef R_AARCH64_ADD_ABS_LO12_NC 81 #define R_AARCH64_ADD_ABS_LO12_NC 277 82 #endif 83 #ifndef R_AARCH64_LDST8_ABS_LO12_NC 84 #define R_AARCH64_LDST8_ABS_LO12_NC 278 85 #endif 86 #ifndef R_AARCH64_TSTBR14 87 #define R_AARCH64_TSTBR14 279 88 #endif 89 #ifndef R_AARCH64_CONDBR19 90 #define R_AARCH64_CONDBR19 280 91 #endif 92 #ifndef R_AARCH64_JUMP26 93 #define R_AARCH64_JUMP26 282 94 #endif 95 #ifndef R_AARCH64_CALL26 96 #define R_AARCH64_CALL26 283 97 #endif 98 #ifndef R_AARCH64_LDST16_ABS_LO12_NC 99 #define R_AARCH64_LDST16_ABS_LO12_NC 284 100 #endif 101 #ifndef R_AARCH64_LDST32_ABS_LO12_NC 102 #define R_AARCH64_LDST32_ABS_LO12_NC 285 103 #endif 104 #ifndef R_AARCH64_LDST64_ABS_LO12_NC 105 #define R_AARCH64_LDST64_ABS_LO12_NC 286 106 #endif 107 #ifndef R_AARCH64_MOVW_PREL_G0 108 #define R_AARCH64_MOVW_PREL_G0 287 109 #endif 110 #ifndef R_AARCH64_MOVW_PREL_G0_NC 111 #define R_AARCH64_MOVW_PREL_G0_NC 288 112 #endif 113 #ifndef R_AARCH64_MOVW_PREL_G1 114 #define R_AARCH64_MOVW_PREL_G1 289 115 #endif 116 #ifndef R_AARCH64_MOVW_PREL_G1_NC 117 #define R_AARCH64_MOVW_PREL_G1_NC 290 118 #endif 119 #ifndef R_AARCH64_MOVW_PREL_G2 120 #define R_AARCH64_MOVW_PREL_G2 291 121 #endif 122 #ifndef R_AARCH64_MOVW_PREL_G2_NC 123 #define R_AARCH64_MOVW_PREL_G2_NC 292 124 #endif 125 #ifndef R_AARCH64_MOVW_PREL_G3 126 #define R_AARCH64_MOVW_PREL_G3 293 127 #endif 128 #ifndef R_AARCH64_LDST128_ABS_LO12_NC 129 #define R_AARCH64_LDST128_ABS_LO12_NC 299 130 #endif 131 132 /* Global state of the processed ELF. */ 133 static struct { 134 const char *path; 135 char *begin; 136 size_t size; 137 Elf64_Ehdr *ehdr; 138 Elf64_Shdr *sh_table; 139 const char *sh_string; 140 } elf; 141 142 #if defined(CONFIG_CPU_LITTLE_ENDIAN) 143 144 #define elf16toh(x) le16toh(x) 145 #define elf32toh(x) le32toh(x) 146 #define elf64toh(x) le64toh(x) 147 148 #define ELFENDIAN ELFDATA2LSB 149 150 #elif defined(CONFIG_CPU_BIG_ENDIAN) 151 152 #define elf16toh(x) be16toh(x) 153 #define elf32toh(x) be32toh(x) 154 #define elf64toh(x) be64toh(x) 155 156 #define ELFENDIAN ELFDATA2MSB 157 158 #else 159 160 #error PDP-endian sadly unsupported... 161 162 #endif 163 164 #define fatal_error(fmt, ...) \ 165 ({ \ 166 fprintf(stderr, "error: %s: " fmt "\n", \ 167 elf.path, ## __VA_ARGS__); \ 168 exit(EXIT_FAILURE); \ 169 __builtin_unreachable(); \ 170 }) 171 172 #define fatal_perror(msg) \ 173 ({ \ 174 fprintf(stderr, "error: %s: " msg ": %s\n", \ 175 elf.path, strerror(errno)); \ 176 exit(EXIT_FAILURE); \ 177 __builtin_unreachable(); \ 178 }) 179 180 #define assert_op(lhs, rhs, fmt, op) \ 181 ({ \ 182 typeof(lhs) _lhs = (lhs); \ 183 typeof(rhs) _rhs = (rhs); \ 184 \ 185 if (!(_lhs op _rhs)) { \ 186 fatal_error("assertion " #lhs " " #op " " #rhs \ 187 " failed (lhs=" fmt ", rhs=" fmt \ 188 ", line=%d)", _lhs, _rhs, __LINE__); \ 189 } \ 190 }) 191 192 #define assert_eq(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, ==) 193 #define assert_ne(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, !=) 194 #define assert_lt(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, <) 195 #define assert_ge(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, >=) 196 197 /* 198 * Return a pointer of a given type at a given offset from 199 * the beginning of the ELF file. 200 */ 201 #define elf_ptr(type, off) ((type *)(elf.begin + (off))) 202 203 /* Iterate over all sections in the ELF. */ 204 #define for_each_section(var) \ 205 for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var) 206 207 /* Iterate over all Elf64_Rela relocations in a given section. */ 208 #define for_each_rela(shdr, var) \ 209 for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset)); \ 210 var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++) 211 212 /* True if a string starts with a given prefix. */ 213 static inline bool starts_with(const char *str, const char *prefix) 214 { 215 return memcmp(str, prefix, strlen(prefix)) == 0; 216 } 217 218 /* Returns a string containing the name of a given section. */ 219 static inline const char *section_name(Elf64_Shdr *shdr) 220 { 221 return elf.sh_string + elf32toh(shdr->sh_name); 222 } 223 224 /* Returns a pointer to the first byte of section data. */ 225 static inline const char *section_begin(Elf64_Shdr *shdr) 226 { 227 return elf_ptr(char, elf64toh(shdr->sh_offset)); 228 } 229 230 /* Find a section by its offset from the beginning of the file. */ 231 static inline Elf64_Shdr *section_by_off(Elf64_Off off) 232 { 233 assert_ne(off, 0UL, "%lu"); 234 return elf_ptr(Elf64_Shdr, off); 235 } 236 237 /* Find a section by its index. */ 238 static inline Elf64_Shdr *section_by_idx(uint16_t idx) 239 { 240 assert_ne(idx, SHN_UNDEF, "%u"); 241 return &elf.sh_table[idx]; 242 } 243 244 /* 245 * Memory-map the given ELF file, perform sanity checks, and 246 * populate global state. 247 */ 248 static void init_elf(const char *path) 249 { 250 int fd, ret; 251 struct stat stat; 252 253 /* Store path in the global struct for error printing. */ 254 elf.path = path; 255 256 /* Open the ELF file. */ 257 fd = open(path, O_RDONLY); 258 if (fd < 0) 259 fatal_perror("Could not open ELF file"); 260 261 /* Get status of ELF file to obtain its size. */ 262 ret = fstat(fd, &stat); 263 if (ret < 0) { 264 close(fd); 265 fatal_perror("Could not get status of ELF file"); 266 } 267 268 /* mmap() the entire ELF file read-only at an arbitrary address. */ 269 elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 270 if (elf.begin == MAP_FAILED) { 271 close(fd); 272 fatal_perror("Could not mmap ELF file"); 273 } 274 275 /* mmap() was successful, close the FD. */ 276 close(fd); 277 278 /* Get pointer to the ELF header. */ 279 assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu"); 280 elf.ehdr = elf_ptr(Elf64_Ehdr, 0); 281 282 /* Check the ELF magic. */ 283 assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x"); 284 assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x"); 285 assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x"); 286 assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x"); 287 288 /* Sanity check that this is an ELF64 relocatable object for AArch64. */ 289 assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u"); 290 assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u"); 291 assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u"); 292 assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u"); 293 294 /* Populate fields of the global struct. */ 295 elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff)); 296 elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx))); 297 } 298 299 /* Print the prologue of the output ASM file. */ 300 static void emit_prologue(void) 301 { 302 printf(".data\n" 303 ".pushsection " HYP_RELOC_SECTION ", \"a\"\n"); 304 } 305 306 /* Print ASM statements needed as a prologue to a processed hyp section. */ 307 static void emit_section_prologue(const char *sh_orig_name) 308 { 309 /* Declare the hyp section symbol. */ 310 printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name); 311 } 312 313 /* 314 * Print ASM statements to create a hyp relocation entry for a given 315 * R_AARCH64_ABS64 relocation. 316 * 317 * The linker of vmlinux will populate the position given by `rela` with 318 * an absolute 64-bit kernel VA. If the kernel is relocatable, it will 319 * also generate a dynamic relocation entry so that the kernel can shift 320 * the address at runtime for KASLR. 321 * 322 * Emit a 32-bit offset from the current address to the position given 323 * by `rela`. This way the kernel can iterate over all kernel VAs used 324 * by hyp at runtime and convert them to hyp VAs. However, that offset 325 * will not be known until linking of `vmlinux`, so emit a PREL32 326 * relocation referencing a symbol that the hyp linker script put at 327 * the beginning of the relocated section + the offset from `rela`. 328 */ 329 static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name) 330 { 331 /* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */ 332 static size_t reloc_offset; 333 334 /* Create storage for the 32-bit offset. */ 335 printf(".word 0\n"); 336 337 /* 338 * Create a PREL32 relocation which instructs the linker of `vmlinux` 339 * to insert offset to position <base> + <offset>, where <base> is 340 * a symbol at the beginning of the relocated section, and <offset> 341 * is `rela->r_offset`. 342 */ 343 printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n", 344 reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name, 345 elf64toh(rela->r_offset)); 346 347 reloc_offset += 4; 348 } 349 350 /* Print the epilogue of the output ASM file. */ 351 static void emit_epilogue(void) 352 { 353 printf(".popsection\n"); 354 } 355 356 /* 357 * Iterate over all RELA relocations in a given section and emit 358 * hyp relocation data for all absolute addresses in hyp code/data. 359 * 360 * Static relocations that generate PC-relative-addressing are ignored. 361 * Failure is reported for unexpected relocation types. 362 */ 363 static void emit_rela_section(Elf64_Shdr *sh_rela) 364 { 365 Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)]; 366 const char *sh_orig_name = section_name(sh_orig); 367 Elf64_Rela *rela; 368 369 /* Skip all non-hyp sections. */ 370 if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX)) 371 return; 372 373 emit_section_prologue(sh_orig_name); 374 375 for_each_rela(sh_rela, rela) { 376 uint32_t type = (uint32_t)elf64toh(rela->r_info); 377 378 /* Check that rela points inside the relocated section. */ 379 assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx"); 380 381 switch (type) { 382 /* 383 * Data relocations to generate absolute addressing. 384 * Emit a hyp relocation. 385 */ 386 case R_AARCH64_ABS64: 387 emit_rela_abs64(rela, sh_orig_name); 388 break; 389 /* Allow 32-bit absolute relocation, for kCFI type hashes. */ 390 case R_AARCH64_ABS32: 391 break; 392 /* Allow position-relative data relocations. */ 393 case R_AARCH64_PREL64: 394 case R_AARCH64_PREL32: 395 case R_AARCH64_PREL16: 396 case R_AARCH64_PLT32: 397 break; 398 /* Allow relocations to generate PC-relative addressing. */ 399 case R_AARCH64_LD_PREL_LO19: 400 case R_AARCH64_ADR_PREL_LO21: 401 case R_AARCH64_ADR_PREL_PG_HI21: 402 case R_AARCH64_ADR_PREL_PG_HI21_NC: 403 case R_AARCH64_ADD_ABS_LO12_NC: 404 case R_AARCH64_LDST8_ABS_LO12_NC: 405 case R_AARCH64_LDST16_ABS_LO12_NC: 406 case R_AARCH64_LDST32_ABS_LO12_NC: 407 case R_AARCH64_LDST64_ABS_LO12_NC: 408 case R_AARCH64_LDST128_ABS_LO12_NC: 409 break; 410 /* Allow relative relocations for control-flow instructions. */ 411 case R_AARCH64_TSTBR14: 412 case R_AARCH64_CONDBR19: 413 case R_AARCH64_JUMP26: 414 case R_AARCH64_CALL26: 415 break; 416 /* Allow group relocations to create PC-relative offset inline. */ 417 case R_AARCH64_MOVW_PREL_G0: 418 case R_AARCH64_MOVW_PREL_G0_NC: 419 case R_AARCH64_MOVW_PREL_G1: 420 case R_AARCH64_MOVW_PREL_G1_NC: 421 case R_AARCH64_MOVW_PREL_G2: 422 case R_AARCH64_MOVW_PREL_G2_NC: 423 case R_AARCH64_MOVW_PREL_G3: 424 break; 425 default: 426 fatal_error("Unexpected RELA type %u", type); 427 } 428 } 429 } 430 431 /* Iterate over all sections and emit hyp relocation data for RELA sections. */ 432 static void emit_all_relocs(void) 433 { 434 Elf64_Shdr *shdr; 435 436 for_each_section(shdr) { 437 switch (elf32toh(shdr->sh_type)) { 438 case SHT_REL: 439 fatal_error("Unexpected SHT_REL section \"%s\"", 440 section_name(shdr)); 441 case SHT_RELA: 442 emit_rela_section(shdr); 443 break; 444 } 445 } 446 } 447 448 int main(int argc, const char **argv) 449 { 450 if (argc != 2) { 451 fprintf(stderr, "Usage: %s <elf_input>\n", argv[0]); 452 return EXIT_FAILURE; 453 } 454 455 init_elf(argv[1]); 456 457 emit_prologue(); 458 emit_all_relocs(); 459 emit_epilogue(); 460 461 return EXIT_SUCCESS; 462 } 463
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.