1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 /* 2 /* strlen.S: Sparc optimized strlen code !! 2 * strlen.S (c) 1995 David Mosberger (davidm@cs.arizona.edu) 3 * Hand optimized from GNU libc's strlen !! 3 * 4 * Copyright (C) 1991,1996 Free Software Found !! 4 * Finds length of a 0-terminated string. Optimized for the 5 * Copyright (C) 1996,2008 David S. Miller (da !! 5 * Alpha architecture: 6 * Copyright (C) 1996, 1997 Jakub Jelinek (jj@ !! 6 * >> 7 * - memory accessed as aligned quadwords only >> 8 * - uses bcmpge to compare 8 bytes in parallel >> 9 * - does binary search to find 0 byte in last >> 10 * quadword (HAKMEM needed 12 instructions to >> 11 * do this instead of the 9 instructions that >> 12 * binary search needs). 7 */ 13 */ 8 14 9 #include <linux/export.h> !! 15 .set noreorder 10 #include <linux/linkage.h> !! 16 .set noat 11 #include <asm/asm.h> !! 17 12 !! 18 .align 3 13 #define LO_MAGIC 0x01010101 !! 19 14 #define HI_MAGIC 0x80808080 !! 20 .globl strlen 15 !! 21 .ent strlen 16 .text !! 22 17 ENTRY(strlen) !! 23 strlen: 18 mov %o0, %o1 !! 24 ldq_u $1, 0($16) # load first quadword ($16 may be misaligned) 19 andcc %o0, 3, %g0 !! 25 lda $2, -1($31) 20 BRANCH32(be, pt, 9f) !! 26 insqh $2, $16, $2 21 sethi %hi(HI_MAGIC), %o4 !! 27 andnot $16, 7, $0 22 ldub [%o0], %o5 !! 28 or $2, $1, $1 23 BRANCH_REG_ZERO(pn, %o5, 11f) !! 29 cmpbge $31, $1, $2 # $2 <- bitmask: bit i == 1 <==> i-th byte == 0 24 add %o0, 1, %o0 !! 30 bne $2, found 25 andcc %o0, 3, %g0 !! 31 26 BRANCH32(be, pn, 4f) !! 32 loop: ldq $1, 8($0) 27 or %o4, %lo(HI_MAGIC), %o3 !! 33 addq $0, 8, $0 # addr += 8 28 ldub [%o0], %o5 !! 34 nop # helps dual issue last two insns 29 BRANCH_REG_ZERO(pn, %o5, 12f) !! 35 cmpbge $31, $1, $2 30 add %o0, 1, %o0 !! 36 beq $2, loop 31 andcc %o0, 3, %g0 !! 37 32 BRANCH32(be, pt, 5f) !! 38 found: blbs $2, done # make aligned case fast 33 sethi %hi(LO_MAGIC), %o4 !! 39 negq $2, $3 34 ldub [%o0], %o5 !! 40 and $2, $3, $2 35 BRANCH_REG_ZERO(pn, %o5, 13f) !! 41 36 add %o0, 1, %o0 !! 42 and $2, 0x0f, $1 37 BRANCH32(ba, pt, 8f) !! 43 addq $0, 4, $3 38 or %o4, %lo(LO_MAGIC), %o2 !! 44 cmoveq $1, $3, $0 39 9: !! 45 40 or %o4, %lo(HI_MAGIC), %o3 !! 46 and $2, 0x33, $1 41 4: !! 47 addq $0, 2, $3 42 sethi %hi(LO_MAGIC), %o4 !! 48 cmoveq $1, $3, $0 43 5: !! 49 44 or %o4, %lo(LO_MAGIC), %o2 !! 50 and $2, 0x55, $1 45 8: !! 51 addq $0, 1, $3 46 ld [%o0], %o5 !! 52 cmoveq $1, $3, $0 47 2: !! 53 48 sub %o5, %o2, %o4 !! 54 done: subq $0, $16, $0 49 andcc %o4, %o3, %g0 !! 55 ret $31, ($26) 50 BRANCH32(be, pt, 8b) !! 56 51 add %o0, 4, %o0 !! 57 .end strlen 52 << 53 /* Check every byte. */ << 54 srl %o5, 24, %g7 << 55 andcc %g7, 0xff, %g0 << 56 BRANCH32(be, pn, 1f) << 57 add %o0, -4, %o4 << 58 srl %o5, 16, %g7 << 59 andcc %g7, 0xff, %g0 << 60 BRANCH32(be, pn, 1f) << 61 add %o4, 1, %o4 << 62 srl %o5, 8, %g7 << 63 andcc %g7, 0xff, %g0 << 64 BRANCH32(be, pn, 1f) << 65 add %o4, 1, %o4 << 66 andcc %o5, 0xff, %g0 << 67 BRANCH32_ANNUL(bne, pt, 2b) << 68 ld [%o0], %o5 << 69 add %o4, 1, %o4 << 70 1: << 71 retl << 72 sub %o4, %o1, %o0 << 73 11: << 74 retl << 75 mov 0, %o0 << 76 12: << 77 retl << 78 mov 1, %o0 << 79 13: << 80 retl << 81 mov 2, %o0 << 82 ENDPROC(strlen) << 83 EXPORT_SYMBOL(strlen) <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.