1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * This file contains kexec low-level functions. 4 * 5 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> 6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz 7 * PPC44x port. Copyright (C) 2011, IBM Corporation 8 * Author: Suzuki Poulose <suzuki@in.ibm.com> 9 */ 10 11 #include <linux/objtool.h> 12 #include <asm/reg.h> 13 #include <asm/page.h> 14 #include <asm/mmu.h> 15 #include <asm/ppc_asm.h> 16 #include <asm/kexec.h> 17 18 .text 19 20 /* 21 * Must be relocatable PIC code callable as a C function. 22 */ 23 .globl relocate_new_kernel 24 relocate_new_kernel: 25 /* r3 = page_list */ 26 /* r4 = reboot_code_buffer */ 27 /* r5 = start_address */ 28 29 #ifdef CONFIG_PPC_85xx 30 31 mr r29, r3 32 mr r30, r4 33 mr r31, r5 34 35 #define ENTRY_MAPPING_KEXEC_SETUP 36 #include <kernel/85xx_entry_mapping.S> 37 #undef ENTRY_MAPPING_KEXEC_SETUP 38 39 mr r3, r29 40 mr r4, r30 41 mr r5, r31 42 43 li r0, 0 44 #elif defined(CONFIG_44x) 45 46 /* Save our parameters */ 47 mr r29, r3 48 mr r30, r4 49 mr r31, r5 50 51 #ifdef CONFIG_PPC_47x 52 /* Check for 47x cores */ 53 mfspr r3,SPRN_PVR 54 srwi r3,r3,16 55 cmplwi cr0,r3,PVR_476FPE@h 56 beq setup_map_47x 57 cmplwi cr0,r3,PVR_476@h 58 beq setup_map_47x 59 cmplwi cr0,r3,PVR_476_ISS@h 60 beq setup_map_47x 61 #endif /* CONFIG_PPC_47x */ 62 63 /* 64 * Code for setting up 1:1 mapping for PPC440x for KEXEC 65 * 66 * We cannot switch off the MMU on PPC44x. 67 * So we: 68 * 1) Invalidate all the mappings except the one we are running from. 69 * 2) Create a tmp mapping for our code in the other address space(TS) and 70 * jump to it. Invalidate the entry we started in. 71 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. 72 * 4) Jump to the 1:1 mapping in original TS. 73 * 5) Invalidate the tmp mapping. 74 * 75 * - Based on the kexec support code for FSL BookE 76 * 77 */ 78 79 /* 80 * Load the PID with kernel PID (0). 81 * Also load our MSR_IS and TID to MMUCR for TLB search. 82 */ 83 li r3, 0 84 mtspr SPRN_PID, r3 85 mfmsr r4 86 andi. r4,r4,MSR_IS@l 87 beq wmmucr 88 oris r3,r3,PPC44x_MMUCR_STS@h 89 wmmucr: 90 mtspr SPRN_MMUCR,r3 91 sync 92 93 /* 94 * Invalidate all the TLB entries except the current entry 95 * where we are running from 96 */ 97 bcl 20,31,$+4 /* Find our address */ 98 0: mflr r5 /* Make it accessible */ 99 tlbsx r23,0,r5 /* Find entry we are in */ 100 li r4,0 /* Start at TLB entry 0 */ 101 li r3,0 /* Set PAGEID inval value */ 102 1: cmpw r23,r4 /* Is this our entry? */ 103 beq skip /* If so, skip the inval */ 104 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ 105 skip: 106 addi r4,r4,1 /* Increment */ 107 cmpwi r4,64 /* Are we done? */ 108 bne 1b /* If not, repeat */ 109 isync 110 111 /* Create a temp mapping and jump to it */ 112 andi. r6, r23, 1 /* Find the index to use */ 113 addi r24, r6, 1 /* r24 will contain 1 or 2 */ 114 115 mfmsr r9 /* get the MSR */ 116 rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ 117 xori r7, r5, 1 /* Use the other address space */ 118 119 /* Read the current mapping entries */ 120 tlbre r3, r23, PPC44x_TLB_PAGEID 121 tlbre r4, r23, PPC44x_TLB_XLAT 122 tlbre r5, r23, PPC44x_TLB_ATTRIB 123 124 /* Save our current XLAT entry */ 125 mr r25, r4 126 127 /* Extract the TLB PageSize */ 128 li r10, 1 /* r10 will hold PageSize */ 129 rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ 130 131 /* XXX: As of now we use 256M, 4K pages */ 132 cmpwi r11, PPC44x_TLB_256M 133 bne tlb_4k 134 rotlwi r10, r10, 28 /* r10 = 256M */ 135 b write_out 136 tlb_4k: 137 cmpwi r11, PPC44x_TLB_4K 138 bne default 139 rotlwi r10, r10, 12 /* r10 = 4K */ 140 b write_out 141 default: 142 rotlwi r10, r10, 10 /* r10 = 1K */ 143 144 write_out: 145 /* 146 * Write out the tmp 1:1 mapping for this code in other address space 147 * Fixup EPN = RPN , TS=other address space 148 */ 149 insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ 150 151 /* Write out the tmp mapping entries */ 152 tlbwe r3, r24, PPC44x_TLB_PAGEID 153 tlbwe r4, r24, PPC44x_TLB_XLAT 154 tlbwe r5, r24, PPC44x_TLB_ATTRIB 155 156 subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ 157 not r10, r11 /* Mask for PageNum */ 158 159 /* Switch to other address space in MSR */ 160 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ 161 162 bcl 20,31,$+4 163 1: mflr r8 164 addi r8, r8, (2f-1b) /* Find the target offset */ 165 166 /* Jump to the tmp mapping */ 167 mtspr SPRN_SRR0, r8 168 mtspr SPRN_SRR1, r9 169 rfi 170 171 2: 172 /* Invalidate the entry we were executing from */ 173 li r3, 0 174 tlbwe r3, r23, PPC44x_TLB_PAGEID 175 176 /* attribute fields. rwx for SUPERVISOR mode */ 177 li r5, 0 178 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) 179 180 /* Create 1:1 mapping in 256M pages */ 181 xori r7, r7, 1 /* Revert back to Original TS */ 182 183 li r8, 0 /* PageNumber */ 184 li r6, 3 /* TLB Index, start at 3 */ 185 186 next_tlb: 187 rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ 188 mr r4, r3 /* RPN = EPN */ 189 ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ 190 insrwi r3, r7, 1, 23 /* Set TS from r7 */ 191 192 tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ 193 tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ 194 tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ 195 196 addi r8, r8, 1 /* Increment PN */ 197 addi r6, r6, 1 /* Increment TLB Index */ 198 cmpwi r8, 8 /* Are we done ? */ 199 bne next_tlb 200 isync 201 202 /* Jump to the new mapping 1:1 */ 203 li r9,0 204 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ 205 206 bcl 20,31,$+4 207 1: mflr r8 208 and r8, r8, r11 /* Get our offset within page */ 209 addi r8, r8, (2f-1b) 210 211 and r5, r25, r10 /* Get our target PageNum */ 212 or r8, r8, r5 /* Target jump address */ 213 214 mtspr SPRN_SRR0, r8 215 mtspr SPRN_SRR1, r9 216 rfi 217 2: 218 /* Invalidate the tmp entry we used */ 219 li r3, 0 220 tlbwe r3, r24, PPC44x_TLB_PAGEID 221 sync 222 b ppc44x_map_done 223 224 #ifdef CONFIG_PPC_47x 225 226 /* 1:1 mapping for 47x */ 227 228 setup_map_47x: 229 230 /* 231 * Load the kernel pid (0) to PID and also to MMUCR[TID]. 232 * Also set the MSR IS->MMUCR STS 233 */ 234 li r3, 0 235 mtspr SPRN_PID, r3 /* Set PID */ 236 mfmsr r4 /* Get MSR */ 237 andi. r4, r4, MSR_IS@l /* TS=1? */ 238 beq 1f /* If not, leave STS=0 */ 239 oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ 240 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ 241 sync 242 243 /* Find the entry we are running from */ 244 bcl 20,31,$+4 245 2: mflr r23 246 tlbsx r23, 0, r23 247 tlbre r24, r23, 0 /* TLB Word 0 */ 248 tlbre r25, r23, 1 /* TLB Word 1 */ 249 tlbre r26, r23, 2 /* TLB Word 2 */ 250 251 252 /* 253 * Invalidates all the tlb entries by writing to 256 RPNs(r4) 254 * of 4k page size in all 4 ways (0-3 in r3). 255 * This would invalidate the entire UTLB including the one we are 256 * running from. However the shadow TLB entries would help us 257 * to continue the execution, until we flush them (rfi/isync). 258 */ 259 addis r3, 0, 0x8000 /* specify the way */ 260 addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ 261 addi r5, 0, 0 262 b clear_utlb_entry 263 264 /* Align the loop to speed things up. from head_44x.S */ 265 .align 6 266 267 clear_utlb_entry: 268 269 tlbwe r4, r3, 0 270 tlbwe r5, r3, 1 271 tlbwe r5, r3, 2 272 addis r3, r3, 0x2000 /* Increment the way */ 273 cmpwi r3, 0 274 bne clear_utlb_entry 275 addis r3, 0, 0x8000 276 addis r4, r4, 0x100 /* Increment the EPN */ 277 cmpwi r4, 0 278 bne clear_utlb_entry 279 280 /* Create the entries in the other address space */ 281 mfmsr r5 282 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ 283 xori r7, r7, 1 /* r7 = !TS */ 284 285 insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ 286 287 /* 288 * write out the TLB entries for the tmp mapping 289 * Use way '0' so that we could easily invalidate it later. 290 */ 291 lis r3, 0x8000 /* Way '0' */ 292 293 tlbwe r24, r3, 0 294 tlbwe r25, r3, 1 295 tlbwe r26, r3, 2 296 297 /* Update the msr to the new TS */ 298 insrwi r5, r7, 1, 26 299 300 bcl 20,31,$+4 301 1: mflr r6 302 addi r6, r6, (2f-1b) 303 304 mtspr SPRN_SRR0, r6 305 mtspr SPRN_SRR1, r5 306 rfi 307 308 /* 309 * Now we are in the tmp address space. 310 * Create a 1:1 mapping for 0-2GiB in the original TS. 311 */ 312 2: 313 li r3, 0 314 li r4, 0 /* TLB Word 0 */ 315 li r5, 0 /* TLB Word 1 */ 316 li r6, 0 317 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ 318 319 li r8, 0 /* PageIndex */ 320 321 xori r7, r7, 1 /* revert back to original TS */ 322 323 write_utlb: 324 rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ 325 /* ERPN = 0 as we don't use memory above 2G */ 326 327 mr r4, r5 /* EPN = RPN */ 328 ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) 329 insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ 330 331 tlbwe r4, r3, 0 /* Write out the entries */ 332 tlbwe r5, r3, 1 333 tlbwe r6, r3, 2 334 addi r8, r8, 1 335 cmpwi r8, 8 /* Have we completed ? */ 336 bne write_utlb 337 338 /* make sure we complete the TLB write up */ 339 isync 340 341 /* 342 * Prepare to jump to the 1:1 mapping. 343 * 1) Extract page size of the tmp mapping 344 * DSIZ = TLB_Word0[22:27] 345 * 2) Calculate the physical address of the address 346 * to jump to. 347 */ 348 rlwinm r10, r24, 0, 22, 27 349 350 cmpwi r10, PPC47x_TLB0_4K 351 bne 0f 352 li r10, 0x1000 /* r10 = 4k */ 353 ANNOTATE_INTRA_FUNCTION_CALL 354 bl 1f 355 356 0: 357 /* Defaults to 256M */ 358 lis r10, 0x1000 359 360 bcl 20,31,$+4 361 1: mflr r4 362 addi r4, r4, (2f-1b) /* virtual address of 2f */ 363 364 subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ 365 not r10, r11 /* Pagemask = ~(offsetmask) */ 366 367 and r5, r25, r10 /* Physical page */ 368 and r6, r4, r11 /* offset within the current page */ 369 370 or r5, r5, r6 /* Physical address for 2f */ 371 372 /* Switch the TS in MSR to the original one */ 373 mfmsr r8 374 insrwi r8, r7, 1, 26 375 376 mtspr SPRN_SRR1, r8 377 mtspr SPRN_SRR0, r5 378 rfi 379 380 2: 381 /* Invalidate the tmp mapping */ 382 lis r3, 0x8000 /* Way '0' */ 383 384 clrrwi r24, r24, 12 /* Clear the valid bit */ 385 tlbwe r24, r3, 0 386 tlbwe r25, r3, 1 387 tlbwe r26, r3, 2 388 389 /* Make sure we complete the TLB write and flush the shadow TLB */ 390 isync 391 392 #endif 393 394 ppc44x_map_done: 395 396 397 /* Restore the parameters */ 398 mr r3, r29 399 mr r4, r30 400 mr r5, r31 401 402 li r0, 0 403 #else 404 li r0, 0 405 406 /* 407 * Set Machine Status Register to a known status, 408 * switch the MMU off and jump to 1: in a single step. 409 */ 410 411 mr r8, r0 412 ori r8, r8, MSR_RI|MSR_ME 413 mtspr SPRN_SRR1, r8 414 addi r8, r4, 1f - relocate_new_kernel 415 mtspr SPRN_SRR0, r8 416 sync 417 rfi 418 419 1: 420 #endif 421 /* from this point address translation is turned off */ 422 /* and interrupts are disabled */ 423 424 /* set a new stack at the bottom of our page... */ 425 /* (not really needed now) */ 426 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ 427 stw r0, 0(r1) 428 429 /* Do the copies */ 430 li r6, 0 /* checksum */ 431 mr r0, r3 432 b 1f 433 434 0: /* top, read another word for the indirection page */ 435 lwzu r0, 4(r3) 436 437 1: 438 /* is it a destination page? (r8) */ 439 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ 440 beq 2f 441 442 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ 443 b 0b 444 445 2: /* is it an indirection page? (r3) */ 446 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ 447 beq 2f 448 449 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ 450 subi r3, r3, 4 451 b 0b 452 453 2: /* are we done? */ 454 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ 455 beq 2f 456 b 3f 457 458 2: /* is it a source page? (r9) */ 459 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ 460 beq 0b 461 462 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ 463 464 li r7, PAGE_SIZE / 4 465 mtctr r7 466 subi r9, r9, 4 467 subi r8, r8, 4 468 9: 469 lwzu r0, 4(r9) /* do the copy */ 470 xor r6, r6, r0 471 stwu r0, 4(r8) 472 dcbst 0, r8 473 sync 474 icbi 0, r8 475 bdnz 9b 476 477 addi r9, r9, 4 478 addi r8, r8, 4 479 b 0b 480 481 3: 482 483 /* To be certain of avoiding problems with self-modifying code 484 * execute a serializing instruction here. 485 */ 486 isync 487 sync 488 489 mfspr r3, SPRN_PIR /* current core we are running on */ 490 mr r4, r5 /* load physical address of chunk called */ 491 492 /* jump to the entry point, usually the setup routine */ 493 mtlr r5 494 blrl 495 496 1: b 1b 497 498 relocate_new_kernel_end: 499 500 .globl relocate_new_kernel_size 501 relocate_new_kernel_size: 502 .long relocate_new_kernel_end - relocate_new_kernel
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.