1 /* !! 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 * arch/xtensa/lib/memset.S !! 2 /* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code >> 3 * Copyright (C) 1991,1996 Free Software Foundation >> 4 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) >> 5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3 * 6 * 4 * ANSI C standard library function memset !! 7 * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and 5 * (Well, almost. .fixup code might return z !! 8 * number of bytes not yet set if exception occurs and we were called as 6 * !! 9 * clear_user. 7 * This file is subject to the terms and cond << 8 * Public License. See the file "COPYING" in << 9 * this archive for more details. << 10 * << 11 * Copyright (C) 2002 Tensilica Inc. << 12 */ << 13 << 14 #include <linux/linkage.h> << 15 #include <asm/asmmacro.h> << 16 #include <asm/core.h> << 17 << 18 /* << 19 * void *memset(void *dst, int c, size_t lengt << 20 * << 21 * The algorithm is as follows: << 22 * Create a word with c in all byte position << 23 * If the destination is aligned, << 24 * do 16B chucks with a loop, and then fin << 25 * 8B, 4B, 2B, and 1B stores conditional o << 26 * If destination is unaligned, align it by << 27 * setting 1B and 2B and then go to aligne << 28 * This code tries to use fall-through branc << 29 * case of an aligned destination (except << 30 * the alignment labels). << 31 */ << 32 << 33 .text << 34 ENTRY(__memset) << 35 WEAK(memset) << 36 << 37 abi_entry_default << 38 # a2/ dst, a3/ c, a4/ length << 39 extui a3, a3, 0, 8 # mask to just << 40 slli a7, a3, 8 # duplicate ch << 41 or a3, a3, a7 # ... << 42 slli a7, a3, 16 # ... << 43 or a3, a3, a7 # ... << 44 mov a5, a2 # copy dst so << 45 movi a6, 3 # for alignmen << 46 bany a2, a6, .Ldstunaligned # if ds << 47 .L0: # return here from .Ldstunaligned when << 48 srli a7, a4, 4 # number of lo << 49 # per iteratio << 50 bnez a4, .Laligned << 51 abi_ret_default << 52 << 53 /* << 54 * Destination is word-aligned. << 55 */ << 56 # set 16 bytes per iteration for word- << 57 .align 4 # 1 mod 4 alig << 58 .byte 0 # (0 mod 4 ali << 59 .Laligned: << 60 #if XCHAL_HAVE_LOOPS << 61 loopnez a7, .Loop1done << 62 #else /* !XCHAL_HAVE_LOOPS */ << 63 beqz a7, .Loop1done << 64 slli a6, a7, 4 << 65 add a6, a6, a5 # a6 = end of << 66 #endif /* !XCHAL_HAVE_LOOPS */ << 67 .Loop1: << 68 EX(10f) s32i a3, a5, 0 << 69 EX(10f) s32i a3, a5, 4 << 70 EX(10f) s32i a3, a5, 8 << 71 EX(10f) s32i a3, a5, 12 << 72 addi a5, a5, 16 << 73 #if !XCHAL_HAVE_LOOPS << 74 blt a5, a6, .Loop1 << 75 #endif /* !XCHAL_HAVE_LOOPS */ << 76 .Loop1done: << 77 bbci.l a4, 3, .L2 << 78 # set 8 bytes << 79 EX(10f) s32i a3, a5, 0 << 80 EX(10f) s32i a3, a5, 4 << 81 addi a5, a5, 8 << 82 .L2: << 83 bbci.l a4, 2, .L3 << 84 # set 4 bytes << 85 EX(10f) s32i a3, a5, 0 << 86 addi a5, a5, 4 << 87 .L3: << 88 bbci.l a4, 1, .L4 << 89 # set 2 bytes << 90 EX(10f) s16i a3, a5, 0 << 91 addi a5, a5, 2 << 92 .L4: << 93 bbci.l a4, 0, .L5 << 94 # set 1 byte << 95 EX(10f) s8i a3, a5, 0 << 96 .L5: << 97 .Lret1: << 98 abi_ret_default << 99 << 100 /* << 101 * Destination is unaligned << 102 */ 10 */ 103 11 104 .Ldstunaligned: !! 12 #include <linux/export.h> 105 bltui a4, 8, .Lbyteset # do s !! 13 #include <asm/ptrace.h> 106 bbci.l a5, 0, .L20 # bran << 107 # dst is only byte aligned << 108 # set 1 byte << 109 EX(10f) s8i a3, a5, 0 << 110 addi a5, a5, 1 << 111 addi a4, a4, -1 << 112 # now retest if dst aligned << 113 bbci.l a5, 1, .L0 # if now align << 114 .L20: << 115 # dst half-aligned << 116 # set 2 bytes << 117 EX(10f) s16i a3, a5, 0 << 118 addi a5, a5, 2 << 119 addi a4, a4, -2 << 120 j .L0 # dst is now a << 121 14 122 /* !! 15 /* Work around cpp -rob */ 123 * Byte by byte set !! 16 #define ALLOC #alloc 124 */ !! 17 #define EXECINSTR #execinstr >> 18 #define EX(x,y,a,b) \ >> 19 98: x,y; \ >> 20 .section .fixup,ALLOC,EXECINSTR; \ >> 21 .align 4; \ >> 22 99: retl; \ >> 23 a, b, %o0; \ >> 24 .section __ex_table,ALLOC; \ >> 25 .align 4; \ >> 26 .word 98b, 99b; \ >> 27 .text; \ 125 .align 4 28 .align 4 126 .byte 0 # 1 mod 4 alig << 127 # (0 mod 4 ali << 128 .Lbyteset: << 129 #if XCHAL_HAVE_LOOPS << 130 loopnez a4, .Lbytesetdone << 131 #else /* !XCHAL_HAVE_LOOPS */ << 132 beqz a4, .Lbytesetdone << 133 add a6, a5, a4 # a6 = ending << 134 #endif /* !XCHAL_HAVE_LOOPS */ << 135 .Lbyteloop: << 136 EX(10f) s8i a3, a5, 0 << 137 addi a5, a5, 1 << 138 #if !XCHAL_HAVE_LOOPS << 139 blt a5, a6, .Lbyteloop << 140 #endif /* !XCHAL_HAVE_LOOPS */ << 141 .Lbytesetdone: << 142 abi_ret_default << 143 << 144 ENDPROC(__memset) << 145 EXPORT_SYMBOL(__memset) << 146 EXPORT_SYMBOL(memset) << 147 29 148 .section .fixup, "ax" !! 30 #define STORE(source, base, offset, n) \ 149 .align 4 !! 31 98: std source, [base + offset + n]; \ >> 32 .section .fixup,ALLOC,EXECINSTR; \ >> 33 .align 4; \ >> 34 99: ba 30f; \ >> 35 sub %o3, n - offset, %o3; \ >> 36 .section __ex_table,ALLOC; \ >> 37 .align 4; \ >> 38 .word 98b, 99b; \ >> 39 .text; \ >> 40 .align 4; >> 41 >> 42 #define STORE_LAST(source, base, offset, n) \ >> 43 EX(std source, [base - offset - n], \ >> 44 add %o1, offset + n); >> 45 >> 46 /* Please don't change these macros, unless you change the logic >> 47 * in the .fixup section below as well. >> 48 * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ >> 49 #define ZERO_BIG_BLOCK(base, offset, source) \ >> 50 STORE(source, base, offset, 0x00); \ >> 51 STORE(source, base, offset, 0x08); \ >> 52 STORE(source, base, offset, 0x10); \ >> 53 STORE(source, base, offset, 0x18); \ >> 54 STORE(source, base, offset, 0x20); \ >> 55 STORE(source, base, offset, 0x28); \ >> 56 STORE(source, base, offset, 0x30); \ >> 57 STORE(source, base, offset, 0x38); >> 58 >> 59 #define ZERO_LAST_BLOCKS(base, offset, source) \ >> 60 STORE_LAST(source, base, offset, 0x38); \ >> 61 STORE_LAST(source, base, offset, 0x30); \ >> 62 STORE_LAST(source, base, offset, 0x28); \ >> 63 STORE_LAST(source, base, offset, 0x20); \ >> 64 STORE_LAST(source, base, offset, 0x18); \ >> 65 STORE_LAST(source, base, offset, 0x10); \ >> 66 STORE_LAST(source, base, offset, 0x08); \ >> 67 STORE_LAST(source, base, offset, 0x00); >> 68 >> 69 .text >> 70 .align 4 >> 71 >> 72 .globl __bzero_begin >> 73 __bzero_begin: >> 74 >> 75 .globl __bzero >> 76 .type __bzero,#function >> 77 .globl memset >> 78 EXPORT_SYMBOL(__bzero) >> 79 EXPORT_SYMBOL(memset) >> 80 memset: >> 81 mov %o0, %g1 >> 82 mov 1, %g4 >> 83 and %o1, 0xff, %g3 >> 84 sll %g3, 8, %g2 >> 85 or %g3, %g2, %g3 >> 86 sll %g3, 16, %g2 >> 87 or %g3, %g2, %g3 >> 88 b 1f >> 89 mov %o2, %o1 >> 90 3: >> 91 cmp %o2, 3 >> 92 be 2f >> 93 EX(stb %g3, [%o0], sub %o1, 0) >> 94 >> 95 cmp %o2, 2 >> 96 be 2f >> 97 EX(stb %g3, [%o0 + 0x01], sub %o1, 1) >> 98 >> 99 EX(stb %g3, [%o0 + 0x02], sub %o1, 2) >> 100 2: >> 101 sub %o2, 4, %o2 >> 102 add %o1, %o2, %o1 >> 103 b 4f >> 104 sub %o0, %o2, %o0 >> 105 >> 106 __bzero: >> 107 clr %g4 >> 108 mov %g0, %g3 >> 109 1: >> 110 cmp %o1, 7 >> 111 bleu 7f >> 112 andcc %o0, 3, %o2 >> 113 >> 114 bne 3b >> 115 4: >> 116 andcc %o0, 4, %g0 >> 117 >> 118 be 2f >> 119 mov %g3, %g2 >> 120 >> 121 EX(st %g3, [%o0], sub %o1, 0) >> 122 sub %o1, 4, %o1 >> 123 add %o0, 4, %o0 >> 124 2: >> 125 andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run >> 126 be 9f >> 127 andcc %o1, 0x78, %o2 >> 128 10: >> 129 ZERO_BIG_BLOCK(%o0, 0x00, %g2) >> 130 subcc %o3, 128, %o3 >> 131 ZERO_BIG_BLOCK(%o0, 0x40, %g2) >> 132 bne 10b >> 133 add %o0, 128, %o0 >> 134 >> 135 orcc %o2, %g0, %g0 >> 136 9: >> 137 be 13f >> 138 andcc %o1, 7, %o1 >> 139 >> 140 srl %o2, 1, %o3 >> 141 set 13f, %o4 >> 142 sub %o4, %o3, %o4 >> 143 jmp %o4 >> 144 add %o0, %o2, %o0 >> 145 >> 146 ZERO_LAST_BLOCKS(%o0, 0x48, %g2) >> 147 ZERO_LAST_BLOCKS(%o0, 0x08, %g2) >> 148 13: >> 149 be 8f >> 150 andcc %o1, 4, %g0 >> 151 >> 152 be 1f >> 153 andcc %o1, 2, %g0 >> 154 >> 155 EX(st %g3, [%o0], and %o1, 7) >> 156 add %o0, 4, %o0 >> 157 1: >> 158 be 1f >> 159 andcc %o1, 1, %g0 >> 160 >> 161 EX(sth %g3, [%o0], and %o1, 3) >> 162 add %o0, 2, %o0 >> 163 1: >> 164 bne,a 8f >> 165 EX(stb %g3, [%o0], and %o1, 1) >> 166 8: >> 167 b 0f >> 168 nop >> 169 7: >> 170 be 13b >> 171 orcc %o1, 0, %g0 >> 172 >> 173 be 0f >> 174 8: >> 175 add %o0, 1, %o0 >> 176 subcc %o1, 1, %o1 >> 177 bne 8b >> 178 EX(stb %g3, [%o0 - 1], add %o1, 1) >> 179 0: >> 180 andcc %g4, 1, %g0 >> 181 be 5f >> 182 nop >> 183 retl >> 184 mov %g1, %o0 >> 185 5: >> 186 retl >> 187 clr %o0 150 188 151 /* We return zero if a failure occurred. */ !! 189 .section .fixup,#alloc,#execinstr >> 190 .align 4 >> 191 30: >> 192 and %o1, 0x7f, %o1 >> 193 retl >> 194 add %o3, %o1, %o0 152 195 153 10: !! 196 .globl __bzero_end 154 movi a2, 0 !! 197 __bzero_end: 155 abi_ret_default <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.