1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Copyright (c) 2011, The Linux Foundation. A !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle >> 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. >> 8 * Copyright (C) 2007 by Maciej W. Rozycki >> 9 * Copyright (C) 2011, 2012 MIPS Technologies, Inc. 4 */ 10 */ >> 11 #include <asm/asm.h> >> 12 #include <asm/asm-offsets.h> >> 13 #include <asm/regdef.h> >> 14 >> 15 #if LONGSIZE == 4 >> 16 #define LONG_S_L swl >> 17 #define LONG_S_R swr >> 18 #else >> 19 #define LONG_S_L sdl >> 20 #define LONG_S_R sdr >> 21 #endif 5 22 >> 23 #ifdef CONFIG_CPU_MICROMIPS >> 24 #define STORSIZE (LONGSIZE * 2) >> 25 #define STORMASK (STORSIZE - 1) >> 26 #define FILL64RG t8 >> 27 #define FILLPTRG t7 >> 28 #undef LONG_S >> 29 #define LONG_S LONG_SP >> 30 #else >> 31 #define STORSIZE LONGSIZE >> 32 #define STORMASK LONGMASK >> 33 #define FILL64RG a1 >> 34 #define FILLPTRG t0 >> 35 #endif 6 36 7 /* HEXAGON assembly optimized memset */ !! 37 #define LEGACY_MODE 1 8 /* Replaces the standard library function mems !! 38 #define EVA_MODE 2 9 39 >> 40 /* >> 41 * No need to protect it with EVA #ifdefery. The generated block of code >> 42 * will never be assembled if EVA is not enabled. >> 43 */ >> 44 #define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr) >> 45 #define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr) 10 46 11 .macro HEXAGON_OPT_FUNC_BEGIN name !! 47 #define EX(insn,reg,addr,handler) \ 12 .text !! 48 .if \mode == LEGACY_MODE; \ 13 .p2align 4 !! 49 9: insn reg, addr; \ 14 .globl \name !! 50 .else; \ 15 .type \name, @function !! 51 9: ___BUILD_EVA_INSN(insn, reg, addr); \ 16 \name: !! 52 .endif; \ >> 53 .section __ex_table,"a"; \ >> 54 PTR 9b, handler; \ >> 55 .previous >> 56 >> 57 .macro f_fill64 dst, offset, val, fixup, mode >> 58 EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) >> 59 EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) >> 60 EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) >> 61 EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) >> 62 #if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) >> 63 EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) >> 64 EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) >> 65 EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) >> 66 EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) >> 67 #endif >> 68 #if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) >> 69 EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) >> 70 EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) >> 71 EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) >> 72 EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) >> 73 EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) >> 74 EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) >> 75 EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) >> 76 EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) >> 77 #endif 17 .endm 78 .endm 18 79 19 .macro HEXAGON_OPT_FUNC_FINISH name !! 80 .set noreorder 20 .size \name, . - \name !! 81 .align 5 >> 82 >> 83 /* >> 84 * Macro to generate the __bzero{,_user} symbol >> 85 * Arguments: >> 86 * mode: LEGACY_MODE or EVA_MODE >> 87 */ >> 88 .macro __BUILD_BZERO mode >> 89 /* Initialize __memset if this is the first time we call this macro */ >> 90 .ifnotdef __memset >> 91 .set __memset, 1 >> 92 .hidden __memset /* Make sure it does not leak */ >> 93 .endif >> 94 >> 95 sltiu t0, a2, STORSIZE /* very small region? */ >> 96 bnez t0, .Lsmall_memset\@ >> 97 andi t0, a0, STORMASK /* aligned? */ >> 98 >> 99 #ifdef CONFIG_CPU_MICROMIPS >> 100 move t8, a1 /* used by 'swp' instruction */ >> 101 move t9, a1 >> 102 #endif >> 103 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS >> 104 beqz t0, 1f >> 105 PTR_SUBU t0, STORSIZE /* alignment in bytes */ >> 106 #else >> 107 .set noat >> 108 li AT, STORSIZE >> 109 beqz t0, 1f >> 110 PTR_SUBU t0, AT /* alignment in bytes */ >> 111 .set at >> 112 #endif >> 113 >> 114 #ifndef CONFIG_CPU_MIPSR6 >> 115 R10KCBARRIER(0(ra)) >> 116 #ifdef __MIPSEB__ >> 117 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ >> 118 #else >> 119 EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ >> 120 #endif >> 121 PTR_SUBU a0, t0 /* long align ptr */ >> 122 PTR_ADDU a2, t0 /* correct size */ >> 123 >> 124 #else /* CONFIG_CPU_MIPSR6 */ >> 125 #define STORE_BYTE(N) \ >> 126 EX(sb, a1, N(a0), .Lbyte_fixup\@); \ >> 127 beqz t0, 0f; \ >> 128 PTR_ADDU t0, 1; >> 129 >> 130 PTR_ADDU a2, t0 /* correct size */ >> 131 PTR_ADDU t0, 1 >> 132 STORE_BYTE(0) >> 133 STORE_BYTE(1) >> 134 #if LONGSIZE == 4 >> 135 EX(sb, a1, 2(a0), .Lbyte_fixup\@) >> 136 #else >> 137 STORE_BYTE(2) >> 138 STORE_BYTE(3) >> 139 STORE_BYTE(4) >> 140 STORE_BYTE(5) >> 141 EX(sb, a1, 6(a0), .Lbyte_fixup\@) >> 142 #endif >> 143 0: >> 144 ori a0, STORMASK >> 145 xori a0, STORMASK >> 146 PTR_ADDIU a0, STORSIZE >> 147 #endif /* CONFIG_CPU_MIPSR6 */ >> 148 1: ori t1, a2, 0x3f /* # of full blocks */ >> 149 xori t1, 0x3f >> 150 beqz t1, .Lmemset_partial\@ /* no block to fill */ >> 151 andi t0, a2, 0x40-STORSIZE >> 152 >> 153 PTR_ADDU t1, a0 /* end address */ >> 154 .set reorder >> 155 1: PTR_ADDIU a0, 64 >> 156 R10KCBARRIER(0(ra)) >> 157 f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode >> 158 bne t1, a0, 1b >> 159 .set noreorder >> 160 >> 161 .Lmemset_partial\@: >> 162 R10KCBARRIER(0(ra)) >> 163 PTR_LA t1, 2f /* where to start */ >> 164 #ifdef CONFIG_CPU_MICROMIPS >> 165 LONG_SRL t7, t0, 1 >> 166 #endif >> 167 #if LONGSIZE == 4 >> 168 PTR_SUBU t1, FILLPTRG >> 169 #else >> 170 .set noat >> 171 LONG_SRL AT, FILLPTRG, 1 >> 172 PTR_SUBU t1, AT >> 173 .set at >> 174 #endif >> 175 jr t1 >> 176 PTR_ADDU a0, t0 /* dest ptr */ >> 177 >> 178 .set push >> 179 .set noreorder >> 180 .set nomacro >> 181 /* ... but first do longs ... */ >> 182 f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode >> 183 2: .set pop >> 184 andi a2, STORMASK /* At most one long to go */ >> 185 >> 186 beqz a2, 1f >> 187 #ifndef CONFIG_CPU_MIPSR6 >> 188 PTR_ADDU a0, a2 /* What's left */ >> 189 R10KCBARRIER(0(ra)) >> 190 #ifdef __MIPSEB__ >> 191 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) >> 192 #else >> 193 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) >> 194 #endif >> 195 #else >> 196 PTR_SUBU t0, $0, a2 >> 197 PTR_ADDIU t0, 1 >> 198 STORE_BYTE(0) >> 199 STORE_BYTE(1) >> 200 #if LONGSIZE == 4 >> 201 EX(sb, a1, 2(a0), .Lbyte_fixup\@) >> 202 #else >> 203 STORE_BYTE(2) >> 204 STORE_BYTE(3) >> 205 STORE_BYTE(4) >> 206 STORE_BYTE(5) >> 207 EX(sb, a1, 6(a0), .Lbyte_fixup\@) >> 208 #endif >> 209 0: >> 210 #endif >> 211 1: jr ra >> 212 move a2, zero >> 213 >> 214 .Lsmall_memset\@: >> 215 beqz a2, 2f >> 216 PTR_ADDU t1, a0, a2 >> 217 >> 218 1: PTR_ADDIU a0, 1 /* fill bytewise */ >> 219 R10KCBARRIER(0(ra)) >> 220 bne t1, a0, 1b >> 221 sb a1, -1(a0) >> 222 >> 223 2: jr ra /* done */ >> 224 move a2, zero >> 225 .if __memset == 1 >> 226 END(memset) >> 227 .set __memset, 0 >> 228 .hidden __memset >> 229 .endif >> 230 >> 231 #ifdef CONFIG_CPU_MIPSR6 >> 232 .Lbyte_fixup\@: >> 233 PTR_SUBU a2, $0, t0 >> 234 jr ra >> 235 PTR_ADDIU a2, 1 >> 236 #endif /* CONFIG_CPU_MIPSR6 */ >> 237 >> 238 .Lfirst_fixup\@: >> 239 jr ra >> 240 nop >> 241 >> 242 .Lfwd_fixup\@: >> 243 PTR_L t0, TI_TASK($28) >> 244 andi a2, 0x3f >> 245 LONG_L t0, THREAD_BUADDR(t0) >> 246 LONG_ADDU a2, t1 >> 247 jr ra >> 248 LONG_SUBU a2, t0 >> 249 >> 250 .Lpartial_fixup\@: >> 251 PTR_L t0, TI_TASK($28) >> 252 andi a2, STORMASK >> 253 LONG_L t0, THREAD_BUADDR(t0) >> 254 LONG_ADDU a2, t1 >> 255 jr ra >> 256 LONG_SUBU a2, t0 >> 257 >> 258 .Llast_fixup\@: >> 259 jr ra >> 260 andi v1, a2, STORMASK >> 261 21 .endm 262 .endm 22 263 23 /* FUNCTION: memset (v2 version) */ !! 264 /* 24 #if __HEXAGON_ARCH__ < 3 !! 265 * memset(void *s, int c, size_t n) 25 HEXAGON_OPT_FUNC_BEGIN memset !! 266 * 26 { !! 267 * a0: start of area to clear 27 r6 = #8 !! 268 * a1: char to fill with 28 r7 = extractu(r0, #3 , #0) !! 269 * a2: size of area to clear 29 p0 = cmp.eq(r2, #0) !! 270 */ 30 p1 = cmp.gtu(r2, #7) !! 271 31 } !! 272 LEAF(memset) 32 { !! 273 beqz a1, 1f 33 r4 = vsplatb(r1) !! 274 move v0, a0 /* result */ 34 r8 = r0 /* leave r0 !! 275 35 r9 = sub(r6, r7) /* bytes unt !! 276 andi a1, 0xff /* spread fillword */ 36 if p0 jumpr r31 /* count == !! 277 LONG_SLL t1, a1, 8 37 } !! 278 or a1, t1 38 { !! 279 LONG_SLL t1, a1, 16 39 r3 = #0 !! 280 #if LONGSIZE == 8 40 r7 = #0 !! 281 or a1, t1 41 p0 = tstbit(r9, #0) !! 282 LONG_SLL t1, a1, 32 42 if p1 jump 2f /* skip byte loo !! 283 #endif 43 } !! 284 or a1, t1 44 !! 285 1: 45 /* less than 8 bytes to set, so just set a byt !! 286 #ifndef CONFIG_EVA 46 !! 287 FEXPORT(__bzero) 47 loop0(1f, r2) /* byte loop */ !! 288 #else 48 .falign !! 289 FEXPORT(__bzero_kernel) 49 1: /* byte loop */ !! 290 #endif 50 { !! 291 __BUILD_BZERO LEGACY_MODE 51 memb(r8++#1) = r4 !! 292 52 }:endloop0 !! 293 #ifdef CONFIG_EVA 53 jumpr r31 !! 294 LEAF(__bzero) 54 .falign !! 295 __BUILD_BZERO EVA_MODE 55 2: /* skip byte loop */ !! 296 END(__bzero) 56 { << 57 r6 = #1 << 58 p0 = tstbit(r9, #1) << 59 p1 = cmp.eq(r2, #1) << 60 if !p0 jump 3f /* skip initial << 61 } << 62 { << 63 memb(r8++#1) = r4 << 64 r3:2 = sub(r3:2, r7:6) << 65 if p1 jumpr r31 << 66 } << 67 .falign << 68 3: /* skip initial byte store */ << 69 { << 70 r6 = #2 << 71 p0 = tstbit(r9, #2) << 72 p1 = cmp.eq(r2, #2) << 73 if !p0 jump 4f /* skip initial << 74 } << 75 { << 76 memh(r8++#2) = r4 << 77 r3:2 = sub(r3:2, r7:6) << 78 if p1 jumpr r31 << 79 } << 80 .falign << 81 4: /* skip initial half store */ << 82 { << 83 r6 = #4 << 84 p0 = cmp.gtu(r2, #7) << 85 p1 = cmp.eq(r2, #4) << 86 if !p0 jump 5f /* skip initial << 87 } << 88 { << 89 memw(r8++#4) = r4 << 90 r3:2 = sub(r3:2, r7:6) << 91 p0 = cmp.gtu(r2, #11) << 92 if p1 jumpr r31 << 93 } << 94 .falign << 95 5: /* skip initial word store */ << 96 { << 97 r10 = lsr(r2, #3) << 98 p1 = cmp.eq(r3, #1) << 99 if !p0 jump 7f /* skip double << 100 } << 101 { << 102 r5 = r4 << 103 r6 = #8 << 104 loop0(6f, r10) /* double loop << 105 } << 106 << 107 /* set bytes a double word at a time */ << 108 << 109 .falign << 110 6: /* double loop */ << 111 { << 112 memd(r8++#8) = r5:4 << 113 r3:2 = sub(r3:2, r7:6) << 114 p1 = cmp.eq(r2, #8) << 115 }:endloop0 << 116 .falign << 117 7: /* skip double loop */ << 118 { << 119 p0 = tstbit(r2, #2) << 120 if p1 jumpr r31 << 121 } << 122 { << 123 r6 = #4 << 124 p0 = tstbit(r2, #1) << 125 p1 = cmp.eq(r2, #4) << 126 if !p0 jump 8f /* skip final w << 127 } << 128 { << 129 memw(r8++#4) = r4 << 130 r3:2 = sub(r3:2, r7:6) << 131 if p1 jumpr r31 << 132 } << 133 .falign << 134 8: /* skip final word store */ << 135 { << 136 p1 = cmp.eq(r2, #2) << 137 if !p0 jump 9f /* skip final h << 138 } << 139 { << 140 memh(r8++#2) = r4 << 141 if p1 jumpr r31 << 142 } << 143 .falign << 144 9: /* skip final half store */ << 145 { << 146 memb(r8++#1) = r4 << 147 jumpr r31 << 148 } << 149 HEXAGON_OPT_FUNC_FINISH memset << 150 #endif << 151 << 152 << 153 /* FUNCTION: memset (v3 and higher version) << 154 #if __HEXAGON_ARCH__ >= 3 << 155 HEXAGON_OPT_FUNC_BEGIN memset << 156 { << 157 r7=vsplatb(r1) << 158 r6 = r0 << 159 if (r2==#0) jump:nt .L1 << 160 } << 161 { << 162 r5:4=combine(r7,r7) << 163 p0 = cmp.gtu(r2,#8) << 164 if (p0.new) jump:nt .L3 << 165 } << 166 { << 167 r3 = r0 << 168 loop0(.L47,r2) << 169 } << 170 .falign << 171 .L47: << 172 { << 173 memb(r3++#1) = r1 << 174 }:endloop0 /* start=.L47 */ << 175 jumpr r31 << 176 .L3: << 177 { << 178 p0 = tstbit(r0,#0) << 179 if (!p0.new) jump:nt .L8 << 180 p1 = cmp.eq(r2, #1) << 181 } << 182 { << 183 r6 = add(r0, #1) << 184 r2 = add(r2,#-1) << 185 memb(r0) = r1 << 186 if (p1) jump .L1 << 187 } << 188 .L8: << 189 { << 190 p0 = tstbit(r6,#1) << 191 if (!p0.new) jump:nt .L10 << 192 } << 193 { << 194 r2 = add(r2,#-2) << 195 memh(r6++#2) = r7 << 196 p0 = cmp.eq(r2, #2) << 197 if (p0.new) jump:nt .L1 << 198 } << 199 .L10: << 200 { << 201 p0 = tstbit(r6,#2) << 202 if (!p0.new) jump:nt .L12 << 203 } << 204 { << 205 r2 = add(r2,#-4) << 206 memw(r6++#4) = r7 << 207 p0 = cmp.eq(r2, #4) << 208 if (p0.new) jump:nt .L1 << 209 } << 210 .L12: << 211 { << 212 p0 = cmp.gtu(r2,#127) << 213 if (!p0.new) jump:nt .L14 << 214 } << 215 r3 = and(r6,#31) << 216 if (r3==#0) jump:nt .L17 << 217 { << 218 memd(r6++#8) = r5:4 << 219 r2 = add(r2,#-8) << 220 } << 221 r3 = and(r6,#31) << 222 if (r3==#0) jump:nt .L17 << 223 { << 224 memd(r6++#8) = r5:4 << 225 r2 = add(r2,#-8) << 226 } << 227 r3 = and(r6,#31) << 228 if (r3==#0) jump:nt .L17 << 229 { << 230 memd(r6++#8) = r5:4 << 231 r2 = add(r2,#-8) << 232 } << 233 .L17: << 234 { << 235 r3 = lsr(r2,#5) << 236 if (r1!=#0) jump:nt .L18 << 237 } << 238 { << 239 r8 = r3 << 240 r3 = r6 << 241 loop0(.L46,r3) << 242 } << 243 .falign << 244 .L46: << 245 { << 246 dczeroa(r6) << 247 r6 = add(r6,#32) << 248 r2 = add(r2,#-32) << 249 }:endloop0 /* start=.L46 */ << 250 .L14: << 251 { << 252 p0 = cmp.gtu(r2,#7) << 253 if (!p0.new) jump:nt .L28 << 254 r8 = lsr(r2,#3) << 255 } << 256 loop0(.L44,r8) << 257 .falign << 258 .L44: << 259 { << 260 memd(r6++#8) = r5:4 << 261 r2 = add(r2,#-8) << 262 }:endloop0 /* start=.L44 */ << 263 .L28: << 264 { << 265 p0 = tstbit(r2,#2) << 266 if (!p0.new) jump:nt .L33 << 267 } << 268 { << 269 r2 = add(r2,#-4) << 270 memw(r6++#4) = r7 << 271 } << 272 .L33: << 273 { << 274 p0 = tstbit(r2,#1) << 275 if (!p0.new) jump:nt .L35 << 276 } << 277 { << 278 r2 = add(r2,#-2) << 279 memh(r6++#2) = r7 << 280 } << 281 .L35: << 282 p0 = cmp.eq(r2,#1) << 283 if (p0) memb(r6) = r1 << 284 .L1: << 285 jumpr r31 << 286 .L18: << 287 loop0(.L45,r3) << 288 .falign << 289 .L45: << 290 dczeroa(r6) << 291 { << 292 memd(r6++#8) = r5:4 << 293 r2 = add(r2,#-32) << 294 } << 295 memd(r6++#8) = r5:4 << 296 memd(r6++#8) = r5:4 << 297 { << 298 memd(r6++#8) = r5:4 << 299 }:endloop0 /* start=.L45 */ << 300 jump .L14 << 301 HEXAGON_OPT_FUNC_FINISH memset << 302 #endif 297 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.