1 /* SPDX-License-Identifier: GPL-2.0-or-later * << 2 /* 1 /* 3 * This file contains miscellaneous low-level 2 * This file contains miscellaneous low-level functions. 4 * Copyright (C) 1995-1996 Gary Thomas (gdt 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 4 * 6 * Largely rewritten by Cort Dougan (cort@cs.n 5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 7 * and Paul Mackerras. 6 * and Paul Mackerras. 8 * 7 * 9 * PPC64 updates by Dave Engebretsen (engebret !! 8 * This program is free software; you can redistribute it and/or >> 9 * modify it under the terms of the GNU General Public License >> 10 * as published by the Free Software Foundation; either version >> 11 * 2 of the License, or (at your option) any later version. 10 * 12 * 11 * setjmp/longjmp code by Paul Mackerras. << 12 */ 13 */ 13 #include <linux/export.h> !! 14 14 #include <asm/ppc_asm.h> !! 15 #include <linux/config.h> >> 16 #include <linux/sys.h> 15 #include <asm/unistd.h> 17 #include <asm/unistd.h> 16 #include <asm/asm-compat.h> !! 18 #include <asm/errno.h> 17 #include <asm/asm-offsets.h> !! 19 #include <asm/processor.h> >> 20 #include <asm/page.h> >> 21 #include <asm/cache.h> >> 22 #include <asm/cputable.h> >> 23 #include <asm/mmu.h> >> 24 #include <asm/ppc_asm.h> >> 25 #include <asm/thread_info.h> >> 26 #include <asm/offsets.h> 18 27 19 .text 28 .text 20 29 >> 30 .align 5 >> 31 _GLOBAL(__delay) >> 32 cmpwi 0,r3,0 >> 33 mtctr r3 >> 34 beqlr >> 35 1: bdnz 1b >> 36 blr >> 37 21 /* 38 /* 22 * Returns (address we are running at) - (addr !! 39 * Returns (address we're running at) - (address we were linked at) 23 * for use before the text and data are mapped 40 * for use before the text and data are mapped to KERNELBASE. >> 41 */ >> 42 _GLOBAL(reloc_offset) >> 43 mflr r0 >> 44 bl 1f >> 45 1: mflr r3 >> 46 lis r4,1b@ha >> 47 addi r4,r4,1b@l >> 48 subf r3,r4,r3 >> 49 mtlr r0 >> 50 blr 24 51 >> 52 /* 25 * add_reloc_offset(x) returns x + reloc_offse 53 * add_reloc_offset(x) returns x + reloc_offset(). 26 */ 54 */ 27 << 28 _GLOBAL(reloc_offset) << 29 li r3, 0 << 30 _GLOBAL(add_reloc_offset) 55 _GLOBAL(add_reloc_offset) 31 mflr r0 56 mflr r0 32 bcl 20,31,$+4 !! 57 bl 1f 33 1: mflr r5 58 1: mflr r5 34 PPC_LL r4,(2f-1b)(r5) !! 59 lis r4,1b@ha >> 60 addi r4,r4,1b@l 35 subf r5,r4,r5 61 subf r5,r4,r5 36 add r3,r3,r5 62 add r3,r3,r5 37 mtlr r0 63 mtlr r0 38 blr 64 blr 39 _ASM_NOKPROBE_SYMBOL(reloc_offset) << 40 _ASM_NOKPROBE_SYMBOL(add_reloc_offset) << 41 65 42 .align 3 !! 66 /* 43 2: PPC_LONG 1b !! 67 * sub_reloc_offset(x) returns x - reloc_offset(). 44 !! 68 */ 45 _GLOBAL(setjmp) !! 69 _GLOBAL(sub_reloc_offset) 46 mflr r0 70 mflr r0 47 PPC_STL r0,0(r3) !! 71 bl 1f 48 PPC_STL r1,SZL(r3) !! 72 1: mflr r5 49 PPC_STL r2,2*SZL(r3) !! 73 lis r4,1b@ha 50 #ifdef CONFIG_PPC32 !! 74 addi r4,r4,1b@l 51 mfcr r12 !! 75 subf r5,r4,r5 52 stmw r12, 3*SZL(r3) !! 76 subf r3,r5,r3 >> 77 mtlr r0 >> 78 blr >> 79 >> 80 /* >> 81 * reloc_got2 runs through the .got2 section adding an offset >> 82 * to each entry. >> 83 */ >> 84 _GLOBAL(reloc_got2) >> 85 mflr r11 >> 86 lis r7,__got2_start@ha >> 87 addi r7,r7,__got2_start@l >> 88 lis r8,__got2_end@ha >> 89 addi r8,r8,__got2_end@l >> 90 subf r8,r7,r8 >> 91 srwi. r8,r8,2 >> 92 beqlr >> 93 mtctr r8 >> 94 bl 1f >> 95 1: mflr r0 >> 96 lis r4,1b@ha >> 97 addi r4,r4,1b@l >> 98 subf r0,r4,r0 >> 99 add r7,r0,r7 >> 100 2: lwz r0,0(r7) >> 101 add r0,r0,r3 >> 102 stw r0,0(r7) >> 103 addi r7,r7,4 >> 104 bdnz 2b >> 105 mtlr r11 >> 106 blr >> 107 >> 108 /* >> 109 * identify_cpu, >> 110 * called with r3 = data offset and r4 = CPU number >> 111 * doesn't change r3 >> 112 */ >> 113 _GLOBAL(identify_cpu) >> 114 addis r8,r3,cpu_specs@ha >> 115 addi r8,r8,cpu_specs@l >> 116 mfpvr r7 >> 117 1: >> 118 lwz r5,CPU_SPEC_PVR_MASK(r8) >> 119 and r5,r5,r7 >> 120 lwz r6,CPU_SPEC_PVR_VALUE(r8) >> 121 cmplw 0,r6,r5 >> 122 beq 1f >> 123 addi r8,r8,CPU_SPEC_ENTRY_SIZE >> 124 b 1b >> 125 1: >> 126 addis r6,r3,cur_cpu_spec@ha >> 127 addi r6,r6,cur_cpu_spec@l >> 128 slwi r4,r4,2 >> 129 sub r8,r8,r3 >> 130 stwx r8,r4,r6 >> 131 blr >> 132 >> 133 /* >> 134 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups >> 135 * and writes nop's over sections of code that don't apply for this cpu. >> 136 * r3 = data offset (not changed) >> 137 */ >> 138 _GLOBAL(do_cpu_ftr_fixups) >> 139 /* Get CPU 0 features */ >> 140 addis r6,r3,cur_cpu_spec@ha >> 141 addi r6,r6,cur_cpu_spec@l >> 142 lwz r4,0(r6) >> 143 add r4,r4,r3 >> 144 lwz r4,CPU_SPEC_FEATURES(r4) >> 145 >> 146 /* Get the fixup table */ >> 147 addis r6,r3,__start___ftr_fixup@ha >> 148 addi r6,r6,__start___ftr_fixup@l >> 149 addis r7,r3,__stop___ftr_fixup@ha >> 150 addi r7,r7,__stop___ftr_fixup@l >> 151 >> 152 /* Do the fixup */ >> 153 1: cmplw 0,r6,r7 >> 154 bgelr >> 155 addi r6,r6,16 >> 156 lwz r8,-16(r6) /* mask */ >> 157 and r8,r8,r4 >> 158 lwz r9,-12(r6) /* value */ >> 159 cmplw 0,r8,r9 >> 160 beq 1b >> 161 lwz r8,-8(r6) /* section begin */ >> 162 lwz r9,-4(r6) /* section end */ >> 163 subf. r9,r8,r9 >> 164 beq 1b >> 165 /* write nops over the section of code */ >> 166 /* todo: if large section, add a branch at the start of it */ >> 167 srwi r9,r9,2 >> 168 mtctr r9 >> 169 add r8,r8,r3 >> 170 lis r0,0x60000000@h /* nop */ >> 171 3: stw r0,0(r8) >> 172 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l >> 173 beq 2f >> 174 dcbst 0,r8 /* suboptimal, but simpler */ >> 175 sync >> 176 icbi 0,r8 >> 177 2: addi r8,r8,4 >> 178 bdnz 3b >> 179 sync /* additional sync needed on g4 */ >> 180 isync >> 181 b 1b >> 182 >> 183 /* >> 184 * call_setup_cpu - call the setup_cpu function for this cpu >> 185 * r3 = data offset, r24 = cpu number >> 186 * >> 187 * Setup function is called with: >> 188 * r3 = data offset >> 189 * r4 = CPU number >> 190 * r5 = ptr to CPU spec (relocated) >> 191 */ >> 192 _GLOBAL(call_setup_cpu) >> 193 addis r5,r3,cur_cpu_spec@ha >> 194 addi r5,r5,cur_cpu_spec@l >> 195 slwi r4,r24,2 >> 196 lwzx r5,r4,r5 >> 197 add r5,r5,r3 >> 198 lwz r6,CPU_SPEC_SETUP(r5) >> 199 add r6,r6,r3 >> 200 mtctr r6 >> 201 mr r4,r24 >> 202 bctr >> 203 >> 204 #ifdef CONFIG_CPU_FREQ_PMAC >> 205 >> 206 /* This gets called by via-pmu.c to switch the PLL selection >> 207 * on 750fx CPU. This function should really be moved to some >> 208 * other place (as most of the cpufreq code in via-pmu >> 209 */ >> 210 _GLOBAL(low_choose_750fx_pll) >> 211 /* Clear MSR:EE */ >> 212 mfmsr r7 >> 213 rlwinm r0,r7,0,17,15 >> 214 mtmsr r0 >> 215 >> 216 /* If switching to PLL1, disable HID0:BTIC */ >> 217 cmpli cr0,r3,0 >> 218 beq 1f >> 219 mfspr r5,HID0 >> 220 rlwinm r5,r5,0,27,25 >> 221 sync >> 222 mtspr HID0,r5 >> 223 isync >> 224 sync >> 225 >> 226 1: >> 227 /* Calc new HID1 value */ >> 228 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ >> 229 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ >> 230 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ >> 231 or r4,r4,r5 >> 232 mtspr SPRN_HID1,r4 >> 233 >> 234 /* Store new HID1 image */ >> 235 rlwinm r6,r1,0,0,18 >> 236 lwz r6,TI_CPU(r6) >> 237 slwi r6,r6,2 >> 238 addis r6,r6,nap_save_hid1@ha >> 239 stw r4,nap_save_hid1@l(r6) >> 240 >> 241 /* If switching to PLL0, enable HID0:BTIC */ >> 242 cmpli cr0,r3,0 >> 243 bne 1f >> 244 mfspr r5,HID0 >> 245 ori r5,r5,HID0_BTIC >> 246 sync >> 247 mtspr HID0,r5 >> 248 isync >> 249 sync >> 250 >> 251 1: >> 252 /* Return */ >> 253 mtmsr r7 >> 254 blr >> 255 >> 256 #endif /* CONFIG_CPU_FREQ_PMAC */ >> 257 >> 258 /* void local_save_flags_ptr(unsigned long *flags) */ >> 259 _GLOBAL(local_save_flags_ptr) >> 260 mfmsr r4 >> 261 stw r4,0(r3) >> 262 blr >> 263 /* >> 264 * Need these nops here for taking over save/restore to >> 265 * handle lost intrs >> 266 * -- Cort >> 267 */ >> 268 nop >> 269 nop >> 270 nop >> 271 nop >> 272 nop >> 273 nop >> 274 nop >> 275 nop >> 276 nop >> 277 nop >> 278 nop >> 279 nop >> 280 nop >> 281 nop >> 282 nop >> 283 nop >> 284 nop >> 285 _GLOBAL(local_save_flags_ptr_end) >> 286 >> 287 /* void local_irq_restore(unsigned long flags) */ >> 288 _GLOBAL(local_irq_restore) >> 289 /* >> 290 * Just set/clear the MSR_EE bit through restore/flags but do not >> 291 * change anything else. This is needed by the RT system and makes >> 292 * sense anyway. >> 293 * -- Cort >> 294 */ >> 295 mfmsr r4 >> 296 /* Copy all except the MSR_EE bit from r4 (current MSR value) >> 297 to r3. This is the sort of thing the rlwimi instruction is >> 298 designed for. -- paulus. */ >> 299 rlwimi r3,r4,0,17,15 >> 300 /* Check if things are setup the way we want _already_. */ >> 301 cmpw 0,r3,r4 >> 302 beqlr >> 303 1: SYNC >> 304 mtmsr r3 >> 305 SYNC >> 306 blr >> 307 nop >> 308 nop >> 309 nop >> 310 nop >> 311 nop >> 312 nop >> 313 nop >> 314 nop >> 315 nop >> 316 nop >> 317 nop >> 318 nop >> 319 nop >> 320 nop >> 321 nop >> 322 nop >> 323 nop >> 324 nop >> 325 nop >> 326 _GLOBAL(local_irq_restore_end) >> 327 >> 328 _GLOBAL(local_irq_disable) >> 329 mfmsr r0 /* Get current interrupt state */ >> 330 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */ >> 331 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ >> 332 SYNC /* Some chip revs have problems here... */ >> 333 mtmsr r0 /* Update machine state */ >> 334 blr /* Done */ >> 335 /* >> 336 * Need these nops here for taking over save/restore to >> 337 * handle lost intrs >> 338 * -- Cort >> 339 */ >> 340 nop >> 341 nop >> 342 nop >> 343 nop >> 344 nop >> 345 nop >> 346 nop >> 347 nop >> 348 nop >> 349 nop >> 350 nop >> 351 nop >> 352 nop >> 353 nop >> 354 nop >> 355 _GLOBAL(local_irq_disable_end) >> 356 >> 357 _GLOBAL(local_irq_enable) >> 358 mfmsr r3 /* Get current state */ >> 359 ori r3,r3,MSR_EE /* Turn on 'EE' bit */ >> 360 SYNC /* Some chip revs have problems here... */ >> 361 mtmsr r3 /* Update machine state */ >> 362 blr >> 363 /* >> 364 * Need these nops here for taking over save/restore to >> 365 * handle lost intrs >> 366 * -- Cort >> 367 */ >> 368 nop >> 369 nop >> 370 nop >> 371 nop >> 372 nop >> 373 nop >> 374 nop >> 375 nop >> 376 nop >> 377 nop >> 378 nop >> 379 nop >> 380 nop >> 381 nop >> 382 nop >> 383 nop >> 384 _GLOBAL(local_irq_enable_end) >> 385 >> 386 /* >> 387 * complement mask on the msr then "or" some values on. >> 388 * _nmask_and_or_msr(nmask, value_to_or) >> 389 */ >> 390 _GLOBAL(_nmask_and_or_msr) >> 391 mfmsr r0 /* Get current msr */ >> 392 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ >> 393 or r0,r0,r4 /* Or on the bits in r4 (second parm) */ >> 394 SYNC /* Some chip revs have problems here... */ >> 395 mtmsr r0 /* Update machine state */ >> 396 isync >> 397 blr /* Done */ >> 398 >> 399 >> 400 /* >> 401 * Flush MMU TLB >> 402 */ >> 403 _GLOBAL(_tlbia) >> 404 #if defined(CONFIG_40x) >> 405 sync /* Flush to memory before changing mapping */ >> 406 tlbia >> 407 isync /* Flush shadow TLB */ >> 408 #elif defined(CONFIG_44x) >> 409 lis r3,0 >> 410 sync >> 411 1: >> 412 tlbwe r3,r3,PPC44x_TLB_PAGEID >> 413 addi r3,r3,1 >> 414 /* Load high watermark */ >> 415 lis r4,tlb_44x_hwater@h >> 416 ori r4,r4,tlb_44x_hwater@l >> 417 lwz r5,0(r4) >> 418 cmpw 0,r3,r5 >> 419 ble 1b >> 420 isync >> 421 #else /* !(CONFIG_40x || CONFIG_44x) */ >> 422 #if defined(CONFIG_SMP) >> 423 rlwinm r8,r1,0,0,18 >> 424 lwz r8,TI_CPU(r8) >> 425 oris r8,r8,10 >> 426 mfmsr r10 >> 427 SYNC >> 428 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ >> 429 rlwinm r0,r0,0,28,26 /* clear DR */ >> 430 mtmsr r0 >> 431 SYNC_601 >> 432 isync >> 433 lis r9,mmu_hash_lock@h >> 434 ori r9,r9,mmu_hash_lock@l >> 435 tophys(r9,r9) >> 436 10: lwarx r7,0,r9 >> 437 cmpi 0,r7,0 >> 438 bne- 10b >> 439 stwcx. r8,0,r9 >> 440 bne- 10b >> 441 sync >> 442 tlbia >> 443 sync >> 444 TLBSYNC >> 445 li r0,0 >> 446 stw r0,0(r9) /* clear mmu_hash_lock */ >> 447 mtmsr r10 >> 448 SYNC_601 >> 449 isync >> 450 #else /* CONFIG_SMP */ >> 451 sync >> 452 tlbia >> 453 sync >> 454 #endif /* CONFIG_SMP */ >> 455 #endif /* ! defined(CONFIG_40x) */ >> 456 blr >> 457 >> 458 /* >> 459 * Flush MMU TLB for a particular address >> 460 */ >> 461 _GLOBAL(_tlbie) >> 462 #if defined(CONFIG_40x) >> 463 tlbsx. r3, 0, r3 >> 464 bne 10f >> 465 sync >> 466 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. >> 467 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate >> 468 * the TLB entry. */ >> 469 tlbwe r3, r3, TLB_TAG >> 470 isync >> 471 10: >> 472 #elif defined(CONFIG_44x) >> 473 mfspr r4,SPRN_MMUCR /* Get MMUCR */ >> 474 lis r5,PPC44x_MMUCR_STS@h >> 475 ori r5,r5,PPC44x_MMUCR_TID@l /* Create mask */ >> 476 andc r4,r4,r5 /* Clear out TID/STS bits */ >> 477 mfspr r5,SPRN_PID /* Get PID */ >> 478 or r4,r4,r5 /* Set TID bits */ >> 479 mfmsr r6 /* Get MSR */ >> 480 andi. r6,r6,MSR_IS@l /* TS=1? */ >> 481 beq 11f /* If not, leave STS=0 */ >> 482 oris r4,r4,PPC44x_MMUCR_STS@h /* Set STS=1 */ >> 483 11: mtspr SPRN_MMUCR, r4 /* Put MMUCR */ >> 484 >> 485 tlbsx. r3, 0, r3 >> 486 bne 10f >> 487 sync >> 488 /* There are only 64 TLB entries, so r3 < 64, >> 489 * which means bit 22, is clear. Since 22 is >> 490 * the V bit in the TLB_PAGEID, loading this >> 491 * value will invalidate the TLB entry. >> 492 */ >> 493 tlbwe r3, r3, PPC44x_TLB_PAGEID >> 494 isync >> 495 10: >> 496 #else /* !(CONFIG_40x || CONFIG_44x) */ >> 497 #if defined(CONFIG_SMP) >> 498 rlwinm r8,r1,0,0,18 >> 499 lwz r8,TI_CPU(r8) >> 500 oris r8,r8,11 >> 501 mfmsr r10 >> 502 SYNC >> 503 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ >> 504 rlwinm r0,r0,0,28,26 /* clear DR */ >> 505 mtmsr r0 >> 506 SYNC_601 >> 507 isync >> 508 lis r9,mmu_hash_lock@h >> 509 ori r9,r9,mmu_hash_lock@l >> 510 tophys(r9,r9) >> 511 10: lwarx r7,0,r9 >> 512 cmpi 0,r7,0 >> 513 bne- 10b >> 514 stwcx. r8,0,r9 >> 515 bne- 10b >> 516 eieio >> 517 tlbie r3 >> 518 sync >> 519 TLBSYNC >> 520 li r0,0 >> 521 stw r0,0(r9) /* clear mmu_hash_lock */ >> 522 mtmsr r10 >> 523 SYNC_601 >> 524 isync >> 525 #else /* CONFIG_SMP */ >> 526 tlbie r3 >> 527 sync >> 528 #endif /* CONFIG_SMP */ >> 529 #endif /* ! CONFIG_40x */ >> 530 blr >> 531 >> 532 /* >> 533 * Flush instruction cache. >> 534 * This is a no-op on the 601. >> 535 */ >> 536 _GLOBAL(flush_instruction_cache) >> 537 #if defined(CONFIG_8xx) >> 538 isync >> 539 lis r5, IDC_INVALL@h >> 540 mtspr IC_CST, r5 >> 541 #elif defined(CONFIG_4xx) >> 542 #ifdef CONFIG_403GCX >> 543 li r3, 512 >> 544 mtctr r3 >> 545 lis r4, KERNELBASE@h >> 546 1: iccci 0, r4 >> 547 addi r4, r4, 16 >> 548 bdnz 1b 53 #else 549 #else 54 mfcr r0 !! 550 lis r3, KERNELBASE@h 55 PPC_STL r0,3*SZL(r3) !! 551 iccci 0,r3 56 PPC_STL r13,4*SZL(r3) << 57 PPC_STL r14,5*SZL(r3) << 58 PPC_STL r15,6*SZL(r3) << 59 PPC_STL r16,7*SZL(r3) << 60 PPC_STL r17,8*SZL(r3) << 61 PPC_STL r18,9*SZL(r3) << 62 PPC_STL r19,10*SZL(r3) << 63 PPC_STL r20,11*SZL(r3) << 64 PPC_STL r21,12*SZL(r3) << 65 PPC_STL r22,13*SZL(r3) << 66 PPC_STL r23,14*SZL(r3) << 67 PPC_STL r24,15*SZL(r3) << 68 PPC_STL r25,16*SZL(r3) << 69 PPC_STL r26,17*SZL(r3) << 70 PPC_STL r27,18*SZL(r3) << 71 PPC_STL r28,19*SZL(r3) << 72 PPC_STL r29,20*SZL(r3) << 73 PPC_STL r30,21*SZL(r3) << 74 PPC_STL r31,22*SZL(r3) << 75 #endif 552 #endif 76 li r3,0 !! 553 #else >> 554 mfspr r3,PVR >> 555 rlwinm r3,r3,16,16,31 >> 556 cmpi 0,r3,1 >> 557 beqlr /* for 601, do nothing */ >> 558 /* 603/604 processor - use invalidate-all bit in HID0 */ >> 559 mfspr r3,HID0 >> 560 ori r3,r3,HID0_ICFI >> 561 mtspr HID0,r3 >> 562 #endif /* CONFIG_8xx/4xx */ >> 563 isync >> 564 blr >> 565 >> 566 /* >> 567 * Write any modified data cache blocks out to memory >> 568 * and invalidate the corresponding instruction cache blocks. >> 569 * This is a no-op on the 601. >> 570 * >> 571 * flush_icache_range(unsigned long start, unsigned long stop) >> 572 */ >> 573 _GLOBAL(flush_icache_range) >> 574 mfspr r5,PVR >> 575 rlwinm r5,r5,16,16,31 >> 576 cmpi 0,r5,1 >> 577 beqlr /* for 601, do nothing */ >> 578 li r5,L1_CACHE_LINE_SIZE-1 >> 579 andc r3,r3,r5 >> 580 subf r4,r3,r4 >> 581 add r4,r4,r5 >> 582 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE >> 583 beqlr >> 584 mtctr r4 >> 585 mr r6,r3 >> 586 1: dcbst 0,r3 >> 587 addi r3,r3,L1_CACHE_LINE_SIZE >> 588 bdnz 1b >> 589 sync /* wait for dcbst's to get to ram */ >> 590 mtctr r4 >> 591 2: icbi 0,r6 >> 592 addi r6,r6,L1_CACHE_LINE_SIZE >> 593 bdnz 2b >> 594 sync /* additional sync needed on g4 */ >> 595 isync >> 596 blr >> 597 /* >> 598 * Write any modified data cache blocks out to memory. >> 599 * Does not invalidate the corresponding cache lines (especially for >> 600 * any corresponding instruction cache). >> 601 * >> 602 * clean_dcache_range(unsigned long start, unsigned long stop) >> 603 */ >> 604 _GLOBAL(clean_dcache_range) >> 605 li r5,L1_CACHE_LINE_SIZE-1 >> 606 andc r3,r3,r5 >> 607 subf r4,r3,r4 >> 608 add r4,r4,r5 >> 609 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE >> 610 beqlr >> 611 mtctr r4 >> 612 >> 613 1: dcbst 0,r3 >> 614 addi r3,r3,L1_CACHE_LINE_SIZE >> 615 bdnz 1b >> 616 sync /* wait for dcbst's to get to ram */ >> 617 blr >> 618 >> 619 /* >> 620 * Write any modified data cache blocks out to memory and invalidate them. >> 621 * Does not invalidate the corresponding instruction cache blocks. >> 622 * >> 623 * flush_dcache_range(unsigned long start, unsigned long stop) >> 624 */ >> 625 _GLOBAL(flush_dcache_range) >> 626 li r5,L1_CACHE_LINE_SIZE-1 >> 627 andc r3,r3,r5 >> 628 subf r4,r3,r4 >> 629 add r4,r4,r5 >> 630 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE >> 631 beqlr >> 632 mtctr r4 >> 633 >> 634 1: dcbf 0,r3 >> 635 addi r3,r3,L1_CACHE_LINE_SIZE >> 636 bdnz 1b >> 637 sync /* wait for dcbst's to get to ram */ >> 638 blr >> 639 >> 640 /* >> 641 * Like above, but invalidate the D-cache. This is used by the 8xx >> 642 * to invalidate the cache so the PPC core doesn't get stale data >> 643 * from the CPM (no cache snooping here :-). >> 644 * >> 645 * invalidate_dcache_range(unsigned long start, unsigned long stop) >> 646 */ >> 647 _GLOBAL(invalidate_dcache_range) >> 648 li r5,L1_CACHE_LINE_SIZE-1 >> 649 andc r3,r3,r5 >> 650 subf r4,r3,r4 >> 651 add r4,r4,r5 >> 652 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE >> 653 beqlr >> 654 mtctr r4 >> 655 >> 656 1: dcbi 0,r3 >> 657 addi r3,r3,L1_CACHE_LINE_SIZE >> 658 bdnz 1b >> 659 sync /* wait for dcbi's to get to ram */ >> 660 blr >> 661 >> 662 #ifdef CONFIG_NOT_COHERENT_CACHE >> 663 /* >> 664 * 40x cores have 8K or 16K dcache and 32 byte line size. >> 665 * 44x has a 32K dcache and 32 byte line size. >> 666 * 8xx has 1, 2, 4, 8K variants. >> 667 * For now, cover the worst case of the 44x. >> 668 * Must be called with external interrupts disabled. >> 669 */ >> 670 #define CACHE_NWAYS 64 >> 671 #define CACHE_NLINES 16 >> 672 >> 673 _GLOBAL(flush_dcache_all) >> 674 li r4, (2 * CACHE_NWAYS * CACHE_NLINES) >> 675 mtctr r4 >> 676 lis r5, KERNELBASE@h >> 677 1: lwz r3, 0(r5) /* Load one word from every line */ >> 678 addi r5, r5, L1_CACHE_LINE_SIZE >> 679 bdnz 1b 77 blr 680 blr >> 681 #endif /* CONFIG_NOT_COHERENT_CACHE */ 78 682 79 _GLOBAL(longjmp) !! 683 /* 80 #ifdef CONFIG_PPC32 !! 684 * Flush a particular page from the data cache to RAM. 81 lmw r12, 3*SZL(r3) !! 685 * Note: this is necessary because the instruction cache does *not* 82 mtcrf 0x38, r12 !! 686 * snoop from the data cache. >> 687 * This is a no-op on the 601 which has a unified cache. >> 688 * >> 689 * void __flush_dcache_icache(void *page) >> 690 */ >> 691 _GLOBAL(__flush_dcache_icache) >> 692 mfspr r5,PVR >> 693 rlwinm r5,r5,16,16,31 >> 694 cmpi 0,r5,1 >> 695 beqlr /* for 601, do nothing */ >> 696 rlwinm r3,r3,0,0,19 /* Get page base address */ >> 697 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ >> 698 mtctr r4 >> 699 mr r6,r3 >> 700 0: dcbst 0,r3 /* Write line to ram */ >> 701 addi r3,r3,L1_CACHE_LINE_SIZE >> 702 bdnz 0b >> 703 sync >> 704 mtctr r4 >> 705 1: icbi 0,r6 >> 706 addi r6,r6,L1_CACHE_LINE_SIZE >> 707 bdnz 1b >> 708 sync >> 709 isync >> 710 blr >> 711 >> 712 /* >> 713 * Flush a particular page from the data cache to RAM, identified >> 714 * by its physical address. We turn off the MMU so we can just use >> 715 * the physical address (this may be a highmem page without a kernel >> 716 * mapping). >> 717 * >> 718 * void __flush_dcache_icache_phys(unsigned long physaddr) >> 719 */ >> 720 _GLOBAL(__flush_dcache_icache_phys) >> 721 mfspr r5,PVR >> 722 rlwinm r5,r5,16,16,31 >> 723 cmpi 0,r5,1 >> 724 beqlr /* for 601, do nothing */ >> 725 mfmsr r10 >> 726 rlwinm r0,r10,0,28,26 /* clear DR */ >> 727 mtmsr r0 >> 728 isync >> 729 rlwinm r3,r3,0,0,19 /* Get page base address */ >> 730 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ >> 731 mtctr r4 >> 732 mr r6,r3 >> 733 0: dcbst 0,r3 /* Write line to ram */ >> 734 addi r3,r3,L1_CACHE_LINE_SIZE >> 735 bdnz 0b >> 736 sync >> 737 mtctr r4 >> 738 1: icbi 0,r6 >> 739 addi r6,r6,L1_CACHE_LINE_SIZE >> 740 bdnz 1b >> 741 sync >> 742 mtmsr r10 /* restore DR */ >> 743 isync >> 744 blr >> 745 >> 746 /* >> 747 * Clear a page using the dcbz instruction, which doesn't cause any >> 748 * memory traffic (except to write out any cache lines which get >> 749 * displaced). This only works on cacheable memory. >> 750 */ >> 751 _GLOBAL(clear_page) >> 752 li r0,4096/L1_CACHE_LINE_SIZE >> 753 mtctr r0 >> 754 #ifdef CONFIG_8xx >> 755 li r4, 0 >> 756 1: stw r4, 0(r3) >> 757 stw r4, 4(r3) >> 758 stw r4, 8(r3) >> 759 stw r4, 12(r3) 83 #else 760 #else 84 PPC_LL r13,4*SZL(r3) !! 761 1: dcbz 0,r3 85 PPC_LL r14,5*SZL(r3) !! 762 #endif 86 PPC_LL r15,6*SZL(r3) !! 763 addi r3,r3,L1_CACHE_LINE_SIZE 87 PPC_LL r16,7*SZL(r3) !! 764 bdnz 1b 88 PPC_LL r17,8*SZL(r3) << 89 PPC_LL r18,9*SZL(r3) << 90 PPC_LL r19,10*SZL(r3) << 91 PPC_LL r20,11*SZL(r3) << 92 PPC_LL r21,12*SZL(r3) << 93 PPC_LL r22,13*SZL(r3) << 94 PPC_LL r23,14*SZL(r3) << 95 PPC_LL r24,15*SZL(r3) << 96 PPC_LL r25,16*SZL(r3) << 97 PPC_LL r26,17*SZL(r3) << 98 PPC_LL r27,18*SZL(r3) << 99 PPC_LL r28,19*SZL(r3) << 100 PPC_LL r29,20*SZL(r3) << 101 PPC_LL r30,21*SZL(r3) << 102 PPC_LL r31,22*SZL(r3) << 103 PPC_LL r0,3*SZL(r3) << 104 mtcrf 0x38,r0 << 105 #endif << 106 PPC_LL r0,0(r3) << 107 PPC_LL r1,SZL(r3) << 108 PPC_LL r2,2*SZL(r3) << 109 mtlr r0 << 110 mr. r3, r4 << 111 bnelr << 112 li r3, 1 << 113 blr 765 blr 114 766 115 _GLOBAL(current_stack_frame) !! 767 /* 116 PPC_LL r3,0(r1) !! 768 * Copy a whole page. We use the dcbz instruction on the destination >> 769 * to reduce memory traffic (it eliminates the unnecessary reads of >> 770 * the destination into cache). This requires that the destination >> 771 * is cacheable. >> 772 */ >> 773 #define COPY_16_BYTES \ >> 774 lwz r6,4(r4); \ >> 775 lwz r7,8(r4); \ >> 776 lwz r8,12(r4); \ >> 777 lwzu r9,16(r4); \ >> 778 stw r6,4(r3); \ >> 779 stw r7,8(r3); \ >> 780 stw r8,12(r3); \ >> 781 stwu r9,16(r3) >> 782 >> 783 _GLOBAL(copy_page) >> 784 addi r3,r3,-4 >> 785 addi r4,r4,-4 >> 786 li r5,4 >> 787 >> 788 #ifndef CONFIG_8xx >> 789 #if MAX_COPY_PREFETCH > 1 >> 790 li r0,MAX_COPY_PREFETCH >> 791 li r11,4 >> 792 mtctr r0 >> 793 11: dcbt r11,r4 >> 794 addi r11,r11,L1_CACHE_LINE_SIZE >> 795 bdnz 11b >> 796 #else /* MAX_L1_COPY_PREFETCH == 1 */ >> 797 dcbt r5,r4 >> 798 li r11,L1_CACHE_LINE_SIZE+4 >> 799 #endif /* MAX_L1_COPY_PREFETCH */ >> 800 #endif /* CONFIG_8xx */ >> 801 >> 802 li r0,4096/L1_CACHE_LINE_SIZE >> 803 mtctr r0 >> 804 1: >> 805 #ifndef CONFIG_8xx >> 806 dcbt r11,r4 >> 807 dcbz r5,r3 >> 808 #endif >> 809 COPY_16_BYTES >> 810 #if L1_CACHE_LINE_SIZE >= 32 >> 811 COPY_16_BYTES >> 812 #if L1_CACHE_LINE_SIZE >= 64 >> 813 COPY_16_BYTES >> 814 COPY_16_BYTES >> 815 #if L1_CACHE_LINE_SIZE >= 128 >> 816 COPY_16_BYTES >> 817 COPY_16_BYTES >> 818 COPY_16_BYTES >> 819 COPY_16_BYTES >> 820 #endif >> 821 #endif >> 822 #endif >> 823 bdnz 1b >> 824 blr >> 825 >> 826 /* >> 827 * void atomic_clear_mask(atomic_t mask, atomic_t *addr) >> 828 * void atomic_set_mask(atomic_t mask, atomic_t *addr); >> 829 */ >> 830 _GLOBAL(atomic_clear_mask) >> 831 10: lwarx r5,0,r4 >> 832 andc r5,r5,r3 >> 833 PPC405_ERR77(0,r4) >> 834 stwcx. r5,0,r4 >> 835 bne- 10b >> 836 blr >> 837 _GLOBAL(atomic_set_mask) >> 838 10: lwarx r5,0,r4 >> 839 or r5,r5,r3 >> 840 PPC405_ERR77(0,r4) >> 841 stwcx. r5,0,r4 >> 842 bne- 10b >> 843 blr >> 844 >> 845 /* >> 846 * I/O string operations >> 847 * >> 848 * insb(port, buf, len) >> 849 * outsb(port, buf, len) >> 850 * insw(port, buf, len) >> 851 * outsw(port, buf, len) >> 852 * insl(port, buf, len) >> 853 * outsl(port, buf, len) >> 854 * insw_ns(port, buf, len) >> 855 * outsw_ns(port, buf, len) >> 856 * insl_ns(port, buf, len) >> 857 * outsl_ns(port, buf, len) >> 858 * >> 859 * The *_ns versions don't do byte-swapping. >> 860 */ >> 861 _GLOBAL(_insb) >> 862 cmpwi 0,r5,0 >> 863 mtctr r5 >> 864 subi r4,r4,1 >> 865 blelr- >> 866 00: lbz r5,0(r3) >> 867 eieio >> 868 stbu r5,1(r4) >> 869 bdnz 00b >> 870 blr >> 871 >> 872 _GLOBAL(_outsb) >> 873 cmpwi 0,r5,0 >> 874 mtctr r5 >> 875 subi r4,r4,1 >> 876 blelr- >> 877 00: lbzu r5,1(r4) >> 878 stb r5,0(r3) >> 879 eieio >> 880 bdnz 00b >> 881 blr >> 882 >> 883 _GLOBAL(_insw) >> 884 cmpwi 0,r5,0 >> 885 mtctr r5 >> 886 subi r4,r4,2 >> 887 blelr- >> 888 00: lhbrx r5,0,r3 >> 889 eieio >> 890 sthu r5,2(r4) >> 891 bdnz 00b >> 892 blr >> 893 >> 894 _GLOBAL(_outsw) >> 895 cmpwi 0,r5,0 >> 896 mtctr r5 >> 897 subi r4,r4,2 >> 898 blelr- >> 899 00: lhzu r5,2(r4) >> 900 eieio >> 901 sthbrx r5,0,r3 >> 902 bdnz 00b >> 903 blr >> 904 >> 905 _GLOBAL(_insl) >> 906 cmpwi 0,r5,0 >> 907 mtctr r5 >> 908 subi r4,r4,4 >> 909 blelr- >> 910 00: lwbrx r5,0,r3 >> 911 eieio >> 912 stwu r5,4(r4) >> 913 bdnz 00b 117 blr 914 blr 118 EXPORT_SYMBOL(current_stack_frame) !! 915 >> 916 _GLOBAL(_outsl) >> 917 cmpwi 0,r5,0 >> 918 mtctr r5 >> 919 subi r4,r4,4 >> 920 blelr- >> 921 00: lwzu r5,4(r4) >> 922 stwbrx r5,0,r3 >> 923 eieio >> 924 bdnz 00b >> 925 blr >> 926 >> 927 _GLOBAL(__ide_mm_insw) >> 928 _GLOBAL(_insw_ns) >> 929 cmpwi 0,r5,0 >> 930 mtctr r5 >> 931 subi r4,r4,2 >> 932 blelr- >> 933 00: lhz r5,0(r3) >> 934 eieio >> 935 sthu r5,2(r4) >> 936 bdnz 00b >> 937 blr >> 938 >> 939 _GLOBAL(__ide_mm_outsw) >> 940 _GLOBAL(_outsw_ns) >> 941 cmpwi 0,r5,0 >> 942 mtctr r5 >> 943 subi r4,r4,2 >> 944 blelr- >> 945 00: lhzu r5,2(r4) >> 946 sth r5,0(r3) >> 947 eieio >> 948 bdnz 00b >> 949 blr >> 950 >> 951 _GLOBAL(__ide_mm_insl) >> 952 _GLOBAL(_insl_ns) >> 953 cmpwi 0,r5,0 >> 954 mtctr r5 >> 955 subi r4,r4,4 >> 956 blelr- >> 957 00: lwz r5,0(r3) >> 958 eieio >> 959 stwu r5,4(r4) >> 960 bdnz 00b >> 961 blr >> 962 >> 963 _GLOBAL(__ide_mm_outsl) >> 964 _GLOBAL(_outsl_ns) >> 965 cmpwi 0,r5,0 >> 966 mtctr r5 >> 967 subi r4,r4,4 >> 968 blelr- >> 969 00: lwzu r5,4(r4) >> 970 stw r5,0(r3) >> 971 eieio >> 972 bdnz 00b >> 973 blr >> 974 >> 975 /* >> 976 * Extended precision shifts. >> 977 * >> 978 * Updated to be valid for shift counts from 0 to 63 inclusive. >> 979 * -- Gabriel >> 980 * >> 981 * R3/R4 has 64 bit value >> 982 * R5 has shift count >> 983 * result in R3/R4 >> 984 * >> 985 * ashrdi3: arithmetic right shift (sign propagation) >> 986 * lshrdi3: logical right shift >> 987 * ashldi3: left shift >> 988 */ >> 989 _GLOBAL(__ashrdi3) >> 990 subfic r6,r5,32 >> 991 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count >> 992 addi r7,r5,32 # could be xori, or addi with -32 >> 993 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) >> 994 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 >> 995 sraw r7,r3,r7 # t2 = MSW >> (count-32) >> 996 or r4,r4,r6 # LSW |= t1 >> 997 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 >> 998 sraw r3,r3,r5 # MSW = MSW >> count >> 999 or r4,r4,r7 # LSW |= t2 >> 1000 blr >> 1001 >> 1002 _GLOBAL(__ashldi3) >> 1003 subfic r6,r5,32 >> 1004 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count >> 1005 addi r7,r5,32 # could be xori, or addi with -32 >> 1006 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) >> 1007 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) >> 1008 or r3,r3,r6 # MSW |= t1 >> 1009 slw r4,r4,r5 # LSW = LSW << count >> 1010 or r3,r3,r7 # MSW |= t2 >> 1011 blr >> 1012 >> 1013 _GLOBAL(__lshrdi3) >> 1014 subfic r6,r5,32 >> 1015 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count >> 1016 addi r7,r5,32 # could be xori, or addi with -32 >> 1017 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) >> 1018 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) >> 1019 or r4,r4,r6 # LSW |= t1 >> 1020 srw r3,r3,r5 # MSW = MSW >> count >> 1021 or r4,r4,r7 # LSW |= t2 >> 1022 blr >> 1023 >> 1024 _GLOBAL(abs) >> 1025 srawi r4,r3,31 >> 1026 xor r3,r3,r4 >> 1027 sub r3,r3,r4 >> 1028 blr >> 1029 >> 1030 _GLOBAL(_get_SP) >> 1031 mr r3,r1 /* Close enough */ >> 1032 blr >> 1033 >> 1034 /* >> 1035 * These are used in the alignment trap handler when emulating >> 1036 * single-precision loads and stores. >> 1037 * We restore and save the fpscr so the task gets the same result >> 1038 * and exceptions as if the cpu had performed the load or store. >> 1039 */ >> 1040 >> 1041 #if defined(CONFIG_4xx) >> 1042 _GLOBAL(cvt_fd) >> 1043 lfs 0,0(r3) >> 1044 stfd 0,0(r4) >> 1045 blr >> 1046 >> 1047 _GLOBAL(cvt_df) >> 1048 lfd 0,0(r3) >> 1049 stfs 0,0(r4) >> 1050 blr >> 1051 #else >> 1052 _GLOBAL(cvt_fd) >> 1053 lfd 0,-4(r5) /* load up fpscr value */ >> 1054 mtfsf 0xff,0 >> 1055 lfs 0,0(r3) >> 1056 stfd 0,0(r4) >> 1057 mffs 0 /* save new fpscr value */ >> 1058 stfd 0,-4(r5) >> 1059 blr >> 1060 >> 1061 _GLOBAL(cvt_df) >> 1062 lfd 0,-4(r5) /* load up fpscr value */ >> 1063 mtfsf 0xff,0 >> 1064 lfd 0,0(r3) >> 1065 stfs 0,0(r4) >> 1066 mffs 0 /* save new fpscr value */ >> 1067 stfd 0,-4(r5) >> 1068 blr >> 1069 #endif >> 1070 >> 1071 /* >> 1072 * Create a kernel thread >> 1073 * kernel_thread(fn, arg, flags) >> 1074 */ >> 1075 _GLOBAL(kernel_thread) >> 1076 stwu r1,-16(r1) >> 1077 stw r30,8(r1) >> 1078 stw r31,12(r1) >> 1079 mr r30,r3 /* function */ >> 1080 mr r31,r4 /* argument */ >> 1081 ori r3,r5,CLONE_VM /* flags */ >> 1082 oris r3,r3,CLONE_UNTRACED>>16 >> 1083 li r4,0 /* new sp (unused) */ >> 1084 li r0,__NR_clone >> 1085 sc >> 1086 cmpi 0,r3,0 /* parent or child? */ >> 1087 bne 1f /* return if parent */ >> 1088 li r0,0 /* make top-level stack frame */ >> 1089 stwu r0,-16(r1) >> 1090 mtlr r30 /* fn addr in lr */ >> 1091 mr r3,r31 /* load arg and call fn */ >> 1092 blrl >> 1093 li r0,__NR_exit /* exit if function returns */ >> 1094 li r3,0 >> 1095 sc >> 1096 1: lwz r30,8(r1) >> 1097 lwz r31,12(r1) >> 1098 addi r1,r1,16 >> 1099 blr >> 1100 >> 1101 /* >> 1102 * This routine is just here to keep GCC happy - sigh... >> 1103 */ >> 1104 _GLOBAL(__main) >> 1105 blr >> 1106 >> 1107 #define SYSCALL(name) \ >> 1108 _GLOBAL(name) \ >> 1109 li r0,__NR_##name; \ >> 1110 sc; \ >> 1111 bnslr; \ >> 1112 lis r4,errno@ha; \ >> 1113 stw r3,errno@l(r4); \ >> 1114 li r3,-1; \ >> 1115 blr >> 1116 >> 1117 #define __NR__exit __NR_exit >> 1118 >> 1119 SYSCALL(setsid) >> 1120 SYSCALL(open) >> 1121 SYSCALL(read) >> 1122 SYSCALL(write) >> 1123 SYSCALL(lseek) >> 1124 SYSCALL(close) >> 1125 SYSCALL(dup) >> 1126 SYSCALL(execve) >> 1127 SYSCALL(waitpid) >> 1128 >> 1129 /* Why isn't this a) automatic, b) written in 'C'? */ >> 1130 .data >> 1131 .align 4 >> 1132 _GLOBAL(sys_call_table) >> 1133 .long sys_restart_syscall /* 0 */ >> 1134 .long sys_exit >> 1135 .long ppc_fork >> 1136 .long sys_read >> 1137 .long sys_write >> 1138 .long sys_open /* 5 */ >> 1139 .long sys_close >> 1140 .long sys_waitpid >> 1141 .long sys_creat >> 1142 .long sys_link >> 1143 .long sys_unlink /* 10 */ >> 1144 .long sys_execve >> 1145 .long sys_chdir >> 1146 .long sys_time >> 1147 .long sys_mknod >> 1148 .long sys_chmod /* 15 */ >> 1149 .long sys_lchown >> 1150 .long sys_ni_syscall /* old break syscall holder */ >> 1151 .long sys_stat >> 1152 .long sys_lseek >> 1153 .long sys_getpid /* 20 */ >> 1154 .long sys_mount >> 1155 .long sys_oldumount >> 1156 .long sys_setuid >> 1157 .long sys_getuid >> 1158 .long sys_stime /* 25 */ >> 1159 .long sys_ptrace >> 1160 .long sys_alarm >> 1161 .long sys_fstat >> 1162 .long sys_pause >> 1163 .long sys_utime /* 30 */ >> 1164 .long sys_ni_syscall /* old stty syscall holder */ >> 1165 .long sys_ni_syscall /* old gtty syscall holder */ >> 1166 .long sys_access >> 1167 .long sys_nice >> 1168 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ >> 1169 .long sys_sync >> 1170 .long sys_kill >> 1171 .long sys_rename >> 1172 .long sys_mkdir >> 1173 .long sys_rmdir /* 40 */ >> 1174 .long sys_dup >> 1175 .long sys_pipe >> 1176 .long sys_times >> 1177 .long sys_ni_syscall /* old prof syscall holder */ >> 1178 .long sys_brk /* 45 */ >> 1179 .long sys_setgid >> 1180 .long sys_getgid >> 1181 .long sys_signal >> 1182 .long sys_geteuid >> 1183 .long sys_getegid /* 50 */ >> 1184 .long sys_acct >> 1185 .long sys_umount /* recycled never used phys() */ >> 1186 .long sys_ni_syscall /* old lock syscall holder */ >> 1187 .long sys_ioctl >> 1188 .long sys_fcntl /* 55 */ >> 1189 .long sys_ni_syscall /* old mpx syscall holder */ >> 1190 .long sys_setpgid >> 1191 .long sys_ni_syscall /* old ulimit syscall holder */ >> 1192 .long sys_olduname >> 1193 .long sys_umask /* 60 */ >> 1194 .long sys_chroot >> 1195 .long sys_ustat >> 1196 .long sys_dup2 >> 1197 .long sys_getppid >> 1198 .long sys_getpgrp /* 65 */ >> 1199 .long sys_setsid >> 1200 .long sys_sigaction >> 1201 .long sys_sgetmask >> 1202 .long sys_ssetmask >> 1203 .long sys_setreuid /* 70 */ >> 1204 .long sys_setregid >> 1205 .long ppc_sigsuspend >> 1206 .long sys_sigpending >> 1207 .long sys_sethostname >> 1208 .long sys_setrlimit /* 75 */ >> 1209 .long sys_old_getrlimit >> 1210 .long sys_getrusage >> 1211 .long sys_gettimeofday >> 1212 .long sys_settimeofday >> 1213 .long sys_getgroups /* 80 */ >> 1214 .long sys_setgroups >> 1215 .long ppc_select >> 1216 .long sys_symlink >> 1217 .long sys_lstat >> 1218 .long sys_readlink /* 85 */ >> 1219 .long sys_uselib >> 1220 .long sys_swapon >> 1221 .long sys_reboot >> 1222 .long old_readdir >> 1223 .long sys_mmap /* 90 */ >> 1224 .long sys_munmap >> 1225 .long sys_truncate >> 1226 .long sys_ftruncate >> 1227 .long sys_fchmod >> 1228 .long sys_fchown /* 95 */ >> 1229 .long sys_getpriority >> 1230 .long sys_setpriority >> 1231 .long sys_ni_syscall /* old profil syscall holder */ >> 1232 .long sys_statfs >> 1233 .long sys_fstatfs /* 100 */ >> 1234 .long sys_ni_syscall >> 1235 .long sys_socketcall >> 1236 .long sys_syslog >> 1237 .long sys_setitimer >> 1238 .long sys_getitimer /* 105 */ >> 1239 .long sys_newstat >> 1240 .long sys_newlstat >> 1241 .long sys_newfstat >> 1242 .long sys_uname >> 1243 .long sys_ni_syscall /* 110 */ >> 1244 .long sys_vhangup >> 1245 .long sys_ni_syscall /* old 'idle' syscall */ >> 1246 .long sys_ni_syscall >> 1247 .long sys_wait4 >> 1248 .long sys_swapoff /* 115 */ >> 1249 .long sys_sysinfo >> 1250 .long sys_ipc >> 1251 .long sys_fsync >> 1252 .long sys_sigreturn >> 1253 .long ppc_clone /* 120 */ >> 1254 .long sys_setdomainname >> 1255 .long sys_newuname >> 1256 .long sys_ni_syscall >> 1257 .long sys_adjtimex >> 1258 .long sys_mprotect /* 125 */ >> 1259 .long sys_sigprocmask >> 1260 .long sys_ni_syscall /* old sys_create_module */ >> 1261 .long sys_init_module >> 1262 .long sys_delete_module >> 1263 .long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */ >> 1264 .long sys_quotactl >> 1265 .long sys_getpgid >> 1266 .long sys_fchdir >> 1267 .long sys_bdflush >> 1268 .long sys_sysfs /* 135 */ >> 1269 .long sys_personality >> 1270 .long sys_ni_syscall /* for afs_syscall */ >> 1271 .long sys_setfsuid >> 1272 .long sys_setfsgid >> 1273 .long sys_llseek /* 140 */ >> 1274 .long sys_getdents >> 1275 .long ppc_select >> 1276 .long sys_flock >> 1277 .long sys_msync >> 1278 .long sys_readv /* 145 */ >> 1279 .long sys_writev >> 1280 .long sys_getsid >> 1281 .long sys_fdatasync >> 1282 .long sys_sysctl >> 1283 .long sys_mlock /* 150 */ >> 1284 .long sys_munlock >> 1285 .long sys_mlockall >> 1286 .long sys_munlockall >> 1287 .long sys_sched_setparam >> 1288 .long sys_sched_getparam /* 155 */ >> 1289 .long sys_sched_setscheduler >> 1290 .long sys_sched_getscheduler >> 1291 .long sys_sched_yield >> 1292 .long sys_sched_get_priority_max >> 1293 .long sys_sched_get_priority_min /* 160 */ >> 1294 .long sys_sched_rr_get_interval >> 1295 .long sys_nanosleep >> 1296 .long sys_mremap >> 1297 .long sys_setresuid >> 1298 .long sys_getresuid /* 165 */ >> 1299 .long sys_ni_syscall /* old sys_query_module */ >> 1300 .long sys_poll >> 1301 .long sys_nfsservctl >> 1302 .long sys_setresgid >> 1303 .long sys_getresgid /* 170 */ >> 1304 .long sys_prctl >> 1305 .long sys_rt_sigreturn >> 1306 .long sys_rt_sigaction >> 1307 .long sys_rt_sigprocmask >> 1308 .long sys_rt_sigpending /* 175 */ >> 1309 .long sys_rt_sigtimedwait >> 1310 .long sys_rt_sigqueueinfo >> 1311 .long ppc_rt_sigsuspend >> 1312 .long sys_pread64 >> 1313 .long sys_pwrite64 /* 180 */ >> 1314 .long sys_chown >> 1315 .long sys_getcwd >> 1316 .long sys_capget >> 1317 .long sys_capset >> 1318 .long sys_sigaltstack /* 185 */ >> 1319 .long sys_sendfile >> 1320 .long sys_ni_syscall /* streams1 */ >> 1321 .long sys_ni_syscall /* streams2 */ >> 1322 .long ppc_vfork >> 1323 .long sys_getrlimit /* 190 */ >> 1324 .long sys_readahead >> 1325 .long sys_mmap2 >> 1326 .long sys_truncate64 >> 1327 .long sys_ftruncate64 >> 1328 .long sys_stat64 /* 195 */ >> 1329 .long sys_lstat64 >> 1330 .long sys_fstat64 >> 1331 .long sys_pciconfig_read >> 1332 .long sys_pciconfig_write >> 1333 .long sys_pciconfig_iobase /* 200 */ >> 1334 .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */ >> 1335 .long sys_getdents64 >> 1336 .long sys_pivot_root >> 1337 .long sys_fcntl64 >> 1338 .long sys_madvise /* 205 */ >> 1339 .long sys_mincore >> 1340 .long sys_gettid >> 1341 .long sys_tkill >> 1342 .long sys_setxattr >> 1343 .long sys_lsetxattr /* 210 */ >> 1344 .long sys_fsetxattr >> 1345 .long sys_getxattr >> 1346 .long sys_lgetxattr >> 1347 .long sys_fgetxattr >> 1348 .long sys_listxattr /* 215 */ >> 1349 .long sys_llistxattr >> 1350 .long sys_flistxattr >> 1351 .long sys_removexattr >> 1352 .long sys_lremovexattr >> 1353 .long sys_fremovexattr /* 220 */ >> 1354 .long sys_futex >> 1355 .long sys_sched_setaffinity >> 1356 .long sys_sched_getaffinity >> 1357 .long sys_ni_syscall >> 1358 .long sys_ni_syscall /* 225 - reserved for Tux */ >> 1359 .long sys_sendfile64 >> 1360 .long sys_io_setup >> 1361 .long sys_io_destroy >> 1362 .long sys_io_getevents >> 1363 .long sys_io_submit /* 230 */ >> 1364 .long sys_io_cancel >> 1365 .long sys_set_tid_address >> 1366 .long sys_fadvise64 >> 1367 .long sys_exit_group >> 1368 .long sys_lookup_dcookie /* 235 */ >> 1369 .long sys_epoll_create >> 1370 .long sys_epoll_ctl >> 1371 .long sys_epoll_wait >> 1372 .long sys_remap_file_pages >> 1373 .long sys_timer_create /* 240 */ >> 1374 .long sys_timer_settime >> 1375 .long sys_timer_gettime >> 1376 .long sys_timer_getoverrun >> 1377 .long sys_timer_delete >> 1378 .long sys_clock_settime /* 245 */ >> 1379 .long sys_clock_gettime >> 1380 .long sys_clock_getres >> 1381 .long sys_clock_nanosleep >> 1382 .long sys_swapcontext >> 1383 .long sys_tgkill /* 250 */ >> 1384 .long sys_utimes >> 1385 .long sys_statfs64 >> 1386 .long sys_fstatfs64 >> 1387 .long ppc_fadvise64_64
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.