1 /* SPDX-License-Identifier: GPL-2.0 */ << 2 /* linux/arch/sparc/lib/memset.S: Sparc optimi 1 /* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code 3 * Copyright (C) 1991,1996 Free Software Found 2 * Copyright (C) 1991,1996 Free Software Foundation 4 * Copyright (C) 1996,1997 Jakub Jelinek (jj@s 3 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * Copyright (C) 1996 David S. Miller (davem@c 4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 6 * 5 * 7 * Calls to memset returns initial %o0. Calls 6 * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and 8 * number of bytes not yet set if exception oc 7 * number of bytes not yet set if exception occurs and we were called as 9 * clear_user. 8 * clear_user. 10 */ 9 */ 11 10 12 #include <linux/export.h> << 13 #include <asm/ptrace.h> 11 #include <asm/ptrace.h> >> 12 #include <asm/export.h> 14 13 15 /* Work around cpp -rob */ 14 /* Work around cpp -rob */ 16 #define ALLOC #alloc 15 #define ALLOC #alloc 17 #define EXECINSTR #execinstr 16 #define EXECINSTR #execinstr 18 #define EX(x,y,a,b) 17 #define EX(x,y,a,b) \ 19 98: x,y; 18 98: x,y; \ 20 .section .fixup,ALLOC,EXECINSTR; 19 .section .fixup,ALLOC,EXECINSTR; \ 21 .align 4; 20 .align 4; \ 22 99: retl; !! 21 99: ba 30f; \ 23 a, b, %o0; 22 a, b, %o0; \ 24 .section __ex_table,ALLOC; 23 .section __ex_table,ALLOC; \ 25 .align 4; 24 .align 4; \ 26 .word 98b, 99b; 25 .word 98b, 99b; \ 27 .text; 26 .text; \ 28 .align 4 27 .align 4 29 28 30 #define STORE(source, base, offset, n) !! 29 #define EXT(start,end,handler) \ 31 98: std source, [base + offset + n]; << 32 .section .fixup,ALLOC,EXECINSTR; << 33 .align 4; << 34 99: ba 30f; << 35 sub %o3, n - offset, %o3; << 36 .section __ex_table,ALLOC; 30 .section __ex_table,ALLOC; \ 37 .align 4; 31 .align 4; \ 38 .word 98b, 99b; !! 32 .word start, 0, end, handler; \ 39 .text; 33 .text; \ 40 .align 4; !! 34 .align 4 41 << 42 #define STORE_LAST(source, base, offset, n) << 43 EX(std source, [base - offset - n], << 44 add %o1, offset + n); << 45 35 46 /* Please don't change these macros, unless yo 36 /* Please don't change these macros, unless you change the logic 47 * in the .fixup section below as well. 37 * in the .fixup section below as well. 48 * Store 64 bytes at (BASE + OFFSET) using val 38 * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ 49 #define ZERO_BIG_BLOCK(base, offset, source) !! 39 #define ZERO_BIG_BLOCK(base, offset, source) \ 50 STORE(source, base, offset, 0x00); !! 40 std source, [base + offset + 0x00]; \ 51 STORE(source, base, offset, 0x08); !! 41 std source, [base + offset + 0x08]; \ 52 STORE(source, base, offset, 0x10); !! 42 std source, [base + offset + 0x10]; \ 53 STORE(source, base, offset, 0x18); !! 43 std source, [base + offset + 0x18]; \ 54 STORE(source, base, offset, 0x20); !! 44 std source, [base + offset + 0x20]; \ 55 STORE(source, base, offset, 0x28); !! 45 std source, [base + offset + 0x28]; \ 56 STORE(source, base, offset, 0x30); !! 46 std source, [base + offset + 0x30]; \ 57 STORE(source, base, offset, 0x38); !! 47 std source, [base + offset + 0x38]; 58 48 59 #define ZERO_LAST_BLOCKS(base, offset, source) 49 #define ZERO_LAST_BLOCKS(base, offset, source) \ 60 STORE_LAST(source, base, offset, 0x38) !! 50 std source, [base - offset - 0x38]; \ 61 STORE_LAST(source, base, offset, 0x30) !! 51 std source, [base - offset - 0x30]; \ 62 STORE_LAST(source, base, offset, 0x28) !! 52 std source, [base - offset - 0x28]; \ 63 STORE_LAST(source, base, offset, 0x20) !! 53 std source, [base - offset - 0x20]; \ 64 STORE_LAST(source, base, offset, 0x18) !! 54 std source, [base - offset - 0x18]; \ 65 STORE_LAST(source, base, offset, 0x10) !! 55 std source, [base - offset - 0x10]; \ 66 STORE_LAST(source, base, offset, 0x08) !! 56 std source, [base - offset - 0x08]; \ 67 STORE_LAST(source, base, offset, 0x00) !! 57 std source, [base - offset - 0x00]; 68 58 69 .text 59 .text 70 .align 4 60 .align 4 71 61 72 .globl __bzero_begin 62 .globl __bzero_begin 73 __bzero_begin: 63 __bzero_begin: 74 64 75 .globl __bzero 65 .globl __bzero 76 .type __bzero,#function 66 .type __bzero,#function 77 .globl memset 67 .globl memset 78 EXPORT_SYMBOL(__bzero) 68 EXPORT_SYMBOL(__bzero) 79 EXPORT_SYMBOL(memset) 69 EXPORT_SYMBOL(memset) >> 70 .globl __memset_start, __memset_end >> 71 __memset_start: 80 memset: 72 memset: 81 mov %o0, %g1 73 mov %o0, %g1 82 mov 1, %g4 74 mov 1, %g4 83 and %o1, 0xff, %g3 75 and %o1, 0xff, %g3 84 sll %g3, 8, %g2 76 sll %g3, 8, %g2 85 or %g3, %g2, %g3 77 or %g3, %g2, %g3 86 sll %g3, 16, %g2 78 sll %g3, 16, %g2 87 or %g3, %g2, %g3 79 or %g3, %g2, %g3 88 b 1f 80 b 1f 89 mov %o2, %o1 81 mov %o2, %o1 90 3: 82 3: 91 cmp %o2, 3 83 cmp %o2, 3 92 be 2f 84 be 2f 93 EX(stb %g3, [%o0], sub %o1, 0) 85 EX(stb %g3, [%o0], sub %o1, 0) 94 86 95 cmp %o2, 2 87 cmp %o2, 2 96 be 2f 88 be 2f 97 EX(stb %g3, [%o0 + 0x01], sub %o1, 1) 89 EX(stb %g3, [%o0 + 0x01], sub %o1, 1) 98 90 99 EX(stb %g3, [%o0 + 0x02], sub %o1, 2) 91 EX(stb %g3, [%o0 + 0x02], sub %o1, 2) 100 2: 92 2: 101 sub %o2, 4, %o2 93 sub %o2, 4, %o2 102 add %o1, %o2, %o1 94 add %o1, %o2, %o1 103 b 4f 95 b 4f 104 sub %o0, %o2, %o0 96 sub %o0, %o2, %o0 105 97 106 __bzero: 98 __bzero: 107 clr %g4 99 clr %g4 108 mov %g0, %g3 100 mov %g0, %g3 109 1: 101 1: 110 cmp %o1, 7 102 cmp %o1, 7 111 bleu 7f 103 bleu 7f 112 andcc %o0, 3, %o2 104 andcc %o0, 3, %o2 113 105 114 bne 3b 106 bne 3b 115 4: 107 4: 116 andcc %o0, 4, %g0 108 andcc %o0, 4, %g0 117 109 118 be 2f 110 be 2f 119 mov %g3, %g2 111 mov %g3, %g2 120 112 121 EX(st %g3, [%o0], sub %o1, 0) 113 EX(st %g3, [%o0], sub %o1, 0) 122 sub %o1, 4, %o1 114 sub %o1, 4, %o1 123 add %o0, 4, %o0 115 add %o0, 4, %o0 124 2: 116 2: 125 andcc %o1, 0xffffff80, %o3 ! Now 117 andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run 126 be 9f 118 be 9f 127 andcc %o1, 0x78, %o2 119 andcc %o1, 0x78, %o2 128 10: 120 10: 129 ZERO_BIG_BLOCK(%o0, 0x00, %g2) 121 ZERO_BIG_BLOCK(%o0, 0x00, %g2) 130 subcc %o3, 128, %o3 122 subcc %o3, 128, %o3 131 ZERO_BIG_BLOCK(%o0, 0x40, %g2) 123 ZERO_BIG_BLOCK(%o0, 0x40, %g2) >> 124 11: >> 125 EXT(10b, 11b, 20f) 132 bne 10b 126 bne 10b 133 add %o0, 128, %o0 127 add %o0, 128, %o0 134 128 135 orcc %o2, %g0, %g0 129 orcc %o2, %g0, %g0 136 9: 130 9: 137 be 13f 131 be 13f 138 andcc %o1, 7, %o1 132 andcc %o1, 7, %o1 139 133 140 srl %o2, 1, %o3 134 srl %o2, 1, %o3 141 set 13f, %o4 135 set 13f, %o4 142 sub %o4, %o3, %o4 136 sub %o4, %o3, %o4 143 jmp %o4 137 jmp %o4 144 add %o0, %o2, %o0 138 add %o0, %o2, %o0 145 139 >> 140 12: 146 ZERO_LAST_BLOCKS(%o0, 0x48, %g2) 141 ZERO_LAST_BLOCKS(%o0, 0x48, %g2) 147 ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 142 ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 148 13: 143 13: 149 be 8f 144 be 8f 150 andcc %o1, 4, %g0 145 andcc %o1, 4, %g0 151 146 152 be 1f 147 be 1f 153 andcc %o1, 2, %g0 148 andcc %o1, 2, %g0 154 149 155 EX(st %g3, [%o0], and %o1, 7) 150 EX(st %g3, [%o0], and %o1, 7) 156 add %o0, 4, %o0 151 add %o0, 4, %o0 157 1: 152 1: 158 be 1f 153 be 1f 159 andcc %o1, 1, %g0 154 andcc %o1, 1, %g0 160 155 161 EX(sth %g3, [%o0], and %o1, 3) 156 EX(sth %g3, [%o0], and %o1, 3) 162 add %o0, 2, %o0 157 add %o0, 2, %o0 163 1: 158 1: 164 bne,a 8f 159 bne,a 8f 165 EX(stb %g3, [%o0], and %o1, 1) 160 EX(stb %g3, [%o0], and %o1, 1) 166 8: 161 8: 167 b 0f 162 b 0f 168 nop 163 nop 169 7: 164 7: 170 be 13b 165 be 13b 171 orcc %o1, 0, %g0 166 orcc %o1, 0, %g0 172 167 173 be 0f 168 be 0f 174 8: 169 8: 175 add %o0, 1, %o0 170 add %o0, 1, %o0 176 subcc %o1, 1, %o1 171 subcc %o1, 1, %o1 177 bne 8b 172 bne 8b 178 EX(stb %g3, [%o0 - 1], add %o1, 1) 173 EX(stb %g3, [%o0 - 1], add %o1, 1) 179 0: 174 0: 180 andcc %g4, 1, %g0 175 andcc %g4, 1, %g0 181 be 5f 176 be 5f 182 nop 177 nop 183 retl 178 retl 184 mov %g1, %o0 179 mov %g1, %o0 185 5: 180 5: 186 retl 181 retl 187 clr %o0 182 clr %o0 >> 183 __memset_end: 188 184 189 .section .fixup,#alloc,#execinstr 185 .section .fixup,#alloc,#execinstr 190 .align 4 186 .align 4 >> 187 20: >> 188 cmp %g2, 8 >> 189 bleu 1f >> 190 and %o1, 0x7f, %o1 >> 191 sub %g2, 9, %g2 >> 192 add %o3, 64, %o3 >> 193 1: >> 194 sll %g2, 3, %g2 >> 195 add %o3, %o1, %o0 >> 196 b 30f >> 197 sub %o0, %g2, %o0 >> 198 21: >> 199 mov 8, %o0 >> 200 and %o1, 7, %o1 >> 201 sub %o0, %g2, %o0 >> 202 sll %o0, 3, %o0 >> 203 b 30f >> 204 add %o0, %o1, %o0 191 30: 205 30: 192 and %o1, 0x7f, %o1 !! 206 /* %o4 is faulting address, %o5 is %pc where fault occurred */ 193 retl !! 207 save %sp, -104, %sp 194 add %o3, %o1, %o0 !! 208 mov %i5, %o0 >> 209 mov %i7, %o1 >> 210 call lookup_fault >> 211 mov %i4, %o2 >> 212 ret >> 213 restore 195 214 196 .globl __bzero_end 215 .globl __bzero_end 197 __bzero_end: 216 __bzero_end:
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.