1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Copyright (c) 2011, The Linux Foundation. A !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * Copyright (C) 1998 by Ralf Baechle 4 */ 7 */ >> 8 #include <asm/asm.h> >> 9 #include <asm/offset.h> >> 10 #include <asm/regdef.h> >> 11 >> 12 #define EX(insn,reg,addr,handler) \ >> 13 9: insn reg, addr; \ >> 14 .section __ex_table,"a"; \ >> 15 PTR 9b, handler; \ >> 16 .previous >> 17 >> 18 #define F_FILL64(dst, offset, val, fixup) \ >> 19 EX(sw, val, (offset + 0x00)(dst), fixup); \ >> 20 EX(sw, val, (offset + 0x04)(dst), fixup); \ >> 21 EX(sw, val, (offset + 0x08)(dst), fixup); \ >> 22 EX(sw, val, (offset + 0x0c)(dst), fixup); \ >> 23 EX(sw, val, (offset + 0x10)(dst), fixup); \ >> 24 EX(sw, val, (offset + 0x14)(dst), fixup); \ >> 25 EX(sw, val, (offset + 0x18)(dst), fixup); \ >> 26 EX(sw, val, (offset + 0x1c)(dst), fixup); \ >> 27 EX(sw, val, (offset + 0x20)(dst), fixup); \ >> 28 EX(sw, val, (offset + 0x24)(dst), fixup); \ >> 29 EX(sw, val, (offset + 0x28)(dst), fixup); \ >> 30 EX(sw, val, (offset + 0x2c)(dst), fixup); \ >> 31 EX(sw, val, (offset + 0x30)(dst), fixup); \ >> 32 EX(sw, val, (offset + 0x34)(dst), fixup); \ >> 33 EX(sw, val, (offset + 0x38)(dst), fixup); \ >> 34 EX(sw, val, (offset + 0x3c)(dst), fixup) 5 35 >> 36 /* >> 37 * memset(void *s, int c, size_t n) >> 38 * >> 39 * a0: start of area to clear >> 40 * a1: char to fill with >> 41 * a2: size of area to clear >> 42 */ >> 43 .set noreorder >> 44 .align 5 >> 45 LEAF(memset) >> 46 beqz a1, 1f >> 47 move v0, a0 /* result */ >> 48 >> 49 andi a1, 0xff /* spread fillword */ >> 50 sll t1, a1, 8 >> 51 or a1, t1 >> 52 sll t1, a1, 16 >> 53 or a1, t1 >> 54 1: >> 55 >> 56 EXPORT(__bzero) >> 57 sltiu t0, a2, 4 /* very small region? */ >> 58 bnez t0, small_memset >> 59 andi t0, a0, 3 /* aligned? */ 6 60 7 /* HEXAGON assembly optimized memset */ !! 61 beqz t0, 1f 8 /* Replaces the standard library function mems !! 62 subu t0, 4 /* alignment in bytes */ 9 << 10 63 11 .macro HEXAGON_OPT_FUNC_BEGIN name !! 64 #ifdef __MIPSEB__ 12 .text !! 65 EX(swl, a1, (a0), first_fixup) /* make word aligned */ 13 .p2align 4 << 14 .globl \name << 15 .type \name, @function << 16 \name: << 17 .endm << 18 << 19 .macro HEXAGON_OPT_FUNC_FINISH name << 20 .size \name, . - \name << 21 .endm << 22 << 23 /* FUNCTION: memset (v2 version) */ << 24 #if __HEXAGON_ARCH__ < 3 << 25 HEXAGON_OPT_FUNC_BEGIN memset << 26 { << 27 r6 = #8 << 28 r7 = extractu(r0, #3 , #0) << 29 p0 = cmp.eq(r2, #0) << 30 p1 = cmp.gtu(r2, #7) << 31 } << 32 { << 33 r4 = vsplatb(r1) << 34 r8 = r0 /* leave r0 << 35 r9 = sub(r6, r7) /* bytes unt << 36 if p0 jumpr r31 /* count == << 37 } << 38 { << 39 r3 = #0 << 40 r7 = #0 << 41 p0 = tstbit(r9, #0) << 42 if p1 jump 2f /* skip byte loo << 43 } << 44 << 45 /* less than 8 bytes to set, so just set a byt << 46 << 47 loop0(1f, r2) /* byte loop */ << 48 .falign << 49 1: /* byte loop */ << 50 { << 51 memb(r8++#1) = r4 << 52 }:endloop0 << 53 jumpr r31 << 54 .falign << 55 2: /* skip byte loop */ << 56 { << 57 r6 = #1 << 58 p0 = tstbit(r9, #1) << 59 p1 = cmp.eq(r2, #1) << 60 if !p0 jump 3f /* skip initial << 61 } << 62 { << 63 memb(r8++#1) = r4 << 64 r3:2 = sub(r3:2, r7:6) << 65 if p1 jumpr r31 << 66 } << 67 .falign << 68 3: /* skip initial byte store */ << 69 { << 70 r6 = #2 << 71 p0 = tstbit(r9, #2) << 72 p1 = cmp.eq(r2, #2) << 73 if !p0 jump 4f /* skip initial << 74 } << 75 { << 76 memh(r8++#2) = r4 << 77 r3:2 = sub(r3:2, r7:6) << 78 if p1 jumpr r31 << 79 } << 80 .falign << 81 4: /* skip initial half store */ << 82 { << 83 r6 = #4 << 84 p0 = cmp.gtu(r2, #7) << 85 p1 = cmp.eq(r2, #4) << 86 if !p0 jump 5f /* skip initial << 87 } << 88 { << 89 memw(r8++#4) = r4 << 90 r3:2 = sub(r3:2, r7:6) << 91 p0 = cmp.gtu(r2, #11) << 92 if p1 jumpr r31 << 93 } << 94 .falign << 95 5: /* skip initial word store */ << 96 { << 97 r10 = lsr(r2, #3) << 98 p1 = cmp.eq(r3, #1) << 99 if !p0 jump 7f /* skip double << 100 } << 101 { << 102 r5 = r4 << 103 r6 = #8 << 104 loop0(6f, r10) /* double loop << 105 } << 106 << 107 /* set bytes a double word at a time */ << 108 << 109 .falign << 110 6: /* double loop */ << 111 { << 112 memd(r8++#8) = r5:4 << 113 r3:2 = sub(r3:2, r7:6) << 114 p1 = cmp.eq(r2, #8) << 115 }:endloop0 << 116 .falign << 117 7: /* skip double loop */ << 118 { << 119 p0 = tstbit(r2, #2) << 120 if p1 jumpr r31 << 121 } << 122 { << 123 r6 = #4 << 124 p0 = tstbit(r2, #1) << 125 p1 = cmp.eq(r2, #4) << 126 if !p0 jump 8f /* skip final w << 127 } << 128 { << 129 memw(r8++#4) = r4 << 130 r3:2 = sub(r3:2, r7:6) << 131 if p1 jumpr r31 << 132 } << 133 .falign << 134 8: /* skip final word store */ << 135 { << 136 p1 = cmp.eq(r2, #2) << 137 if !p0 jump 9f /* skip final h << 138 } << 139 { << 140 memh(r8++#2) = r4 << 141 if p1 jumpr r31 << 142 } << 143 .falign << 144 9: /* skip final half store */ << 145 { << 146 memb(r8++#1) = r4 << 147 jumpr r31 << 148 } << 149 HEXAGON_OPT_FUNC_FINISH memset << 150 #endif 66 #endif >> 67 #ifdef __MIPSEL__ >> 68 EX(swr, a1, (a0), first_fixup) /* make word aligned */ >> 69 #endif >> 70 subu a0, t0 /* word align ptr */ >> 71 addu a2, t0 /* correct size */ 151 72 152 !! 73 1: ori t1, a2, 0x3f /* # of full blocks */ 153 /* FUNCTION: memset (v3 and higher version) !! 74 xori t1, 0x3f 154 #if __HEXAGON_ARCH__ >= 3 !! 75 beqz t1, memset_partial /* no block to fill */ 155 HEXAGON_OPT_FUNC_BEGIN memset !! 76 andi t0, a2, 0x3c 156 { !! 77 157 r7=vsplatb(r1) !! 78 addu t1, a0 /* end address */ 158 r6 = r0 !! 79 .set reorder 159 if (r2==#0) jump:nt .L1 !! 80 1: addiu a0, 64 160 } !! 81 F_FILL64(a0, -64, a1, fwd_fixup) 161 { !! 82 bne t1, a0, 1b 162 r5:4=combine(r7,r7) !! 83 .set noreorder 163 p0 = cmp.gtu(r2,#8) !! 84 164 if (p0.new) jump:nt .L3 !! 85 memset_partial: 165 } !! 86 PTR_LA t1, 2f /* where to start */ 166 { !! 87 subu t1, t0 167 r3 = r0 !! 88 jr t1 168 loop0(.L47,r2) !! 89 addu a0, t0 /* dest ptr */ 169 } !! 90 170 .falign !! 91 .set push 171 .L47: !! 92 .set noreorder 172 { !! 93 .set nomacro 173 memb(r3++#1) = r1 !! 94 F_FILL64(a0, -64, a1, partial_fixup) /* ... but first do wrds ... */ 174 }:endloop0 /* start=.L47 */ !! 95 2: .set pop 175 jumpr r31 !! 96 andi a2, 3 /* 0 <= n <= 3 to go */ 176 .L3: !! 97 177 { !! 98 beqz a2, 1f 178 p0 = tstbit(r0,#0) !! 99 addu a0, a2 /* What's left */ 179 if (!p0.new) jump:nt .L8 !! 100 #ifdef __MIPSEB__ 180 p1 = cmp.eq(r2, #1) !! 101 EX(swr, a1, -1(a0), last_fixup) 181 } !! 102 #endif 182 { !! 103 #ifdef __MIPSEL__ 183 r6 = add(r0, #1) !! 104 EX(swl, a1, -1(a0), last_fixup) 184 r2 = add(r2,#-1) << 185 memb(r0) = r1 << 186 if (p1) jump .L1 << 187 } << 188 .L8: << 189 { << 190 p0 = tstbit(r6,#1) << 191 if (!p0.new) jump:nt .L10 << 192 } << 193 { << 194 r2 = add(r2,#-2) << 195 memh(r6++#2) = r7 << 196 p0 = cmp.eq(r2, #2) << 197 if (p0.new) jump:nt .L1 << 198 } << 199 .L10: << 200 { << 201 p0 = tstbit(r6,#2) << 202 if (!p0.new) jump:nt .L12 << 203 } << 204 { << 205 r2 = add(r2,#-4) << 206 memw(r6++#4) = r7 << 207 p0 = cmp.eq(r2, #4) << 208 if (p0.new) jump:nt .L1 << 209 } << 210 .L12: << 211 { << 212 p0 = cmp.gtu(r2,#127) << 213 if (!p0.new) jump:nt .L14 << 214 } << 215 r3 = and(r6,#31) << 216 if (r3==#0) jump:nt .L17 << 217 { << 218 memd(r6++#8) = r5:4 << 219 r2 = add(r2,#-8) << 220 } << 221 r3 = and(r6,#31) << 222 if (r3==#0) jump:nt .L17 << 223 { << 224 memd(r6++#8) = r5:4 << 225 r2 = add(r2,#-8) << 226 } << 227 r3 = and(r6,#31) << 228 if (r3==#0) jump:nt .L17 << 229 { << 230 memd(r6++#8) = r5:4 << 231 r2 = add(r2,#-8) << 232 } << 233 .L17: << 234 { << 235 r3 = lsr(r2,#5) << 236 if (r1!=#0) jump:nt .L18 << 237 } << 238 { << 239 r8 = r3 << 240 r3 = r6 << 241 loop0(.L46,r3) << 242 } << 243 .falign << 244 .L46: << 245 { << 246 dczeroa(r6) << 247 r6 = add(r6,#32) << 248 r2 = add(r2,#-32) << 249 }:endloop0 /* start=.L46 */ << 250 .L14: << 251 { << 252 p0 = cmp.gtu(r2,#7) << 253 if (!p0.new) jump:nt .L28 << 254 r8 = lsr(r2,#3) << 255 } << 256 loop0(.L44,r8) << 257 .falign << 258 .L44: << 259 { << 260 memd(r6++#8) = r5:4 << 261 r2 = add(r2,#-8) << 262 }:endloop0 /* start=.L44 */ << 263 .L28: << 264 { << 265 p0 = tstbit(r2,#2) << 266 if (!p0.new) jump:nt .L33 << 267 } << 268 { << 269 r2 = add(r2,#-4) << 270 memw(r6++#4) = r7 << 271 } << 272 .L33: << 273 { << 274 p0 = tstbit(r2,#1) << 275 if (!p0.new) jump:nt .L35 << 276 } << 277 { << 278 r2 = add(r2,#-2) << 279 memh(r6++#2) = r7 << 280 } << 281 .L35: << 282 p0 = cmp.eq(r2,#1) << 283 if (p0) memb(r6) = r1 << 284 .L1: << 285 jumpr r31 << 286 .L18: << 287 loop0(.L45,r3) << 288 .falign << 289 .L45: << 290 dczeroa(r6) << 291 { << 292 memd(r6++#8) = r5:4 << 293 r2 = add(r2,#-32) << 294 } << 295 memd(r6++#8) = r5:4 << 296 memd(r6++#8) = r5:4 << 297 { << 298 memd(r6++#8) = r5:4 << 299 }:endloop0 /* start=.L45 */ << 300 jump .L14 << 301 HEXAGON_OPT_FUNC_FINISH memset << 302 #endif 105 #endif >> 106 1: jr ra >> 107 move a2, zero >> 108 >> 109 small_memset: >> 110 beqz a2, 2f >> 111 addu t1, a0, a2 >> 112 >> 113 1: addiu a0, 1 /* fill bytewise */ >> 114 bne t1, a0, 1b >> 115 sb a1, -1(a0) >> 116 >> 117 2: jr ra /* done */ >> 118 move a2, zero >> 119 END(memset) >> 120 >> 121 first_fixup: >> 122 jr ra >> 123 nop >> 124 >> 125 fwd_fixup: >> 126 lw t0, THREAD_BUADDR($28) >> 127 andi a2, 0x3f >> 128 addu a2, t1 >> 129 jr ra >> 130 subu a2, t0 >> 131 >> 132 partial_fixup: >> 133 lw t0, THREAD_BUADDR($28) >> 134 andi a2, 3 >> 135 addu a2, t1 >> 136 jr ra >> 137 subu a2, t0 >> 138 >> 139 last_fixup: >> 140 jr ra >> 141 andi v1, a2, 3
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.