1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Copyright (C) 2013 Regents of the Universit !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle >> 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. >> 8 * Copyright (C) 2007 by Maciej W. Rozycki >> 9 * Copyright (C) 2011, 2012 MIPS Technologies, Inc. 4 */ 10 */ >> 11 #include <asm/asm.h> >> 12 #include <asm/asm-offsets.h> >> 13 #include <asm/regdef.h> 5 14 >> 15 #if LONGSIZE == 4 >> 16 #define LONG_S_L swl >> 17 #define LONG_S_R swr >> 18 #else >> 19 #define LONG_S_L sdl >> 20 #define LONG_S_R sdr >> 21 #endif 6 22 7 #include <linux/linkage.h> !! 23 #ifdef CONFIG_CPU_MICROMIPS 8 #include <asm/asm.h> !! 24 #define STORSIZE (LONGSIZE * 2) >> 25 #define STORMASK (STORSIZE - 1) >> 26 #define FILL64RG t8 >> 27 #define FILLPTRG t7 >> 28 #undef LONG_S >> 29 #define LONG_S LONG_SP >> 30 #else >> 31 #define STORSIZE LONGSIZE >> 32 #define STORMASK LONGMASK >> 33 #define FILL64RG a1 >> 34 #define FILLPTRG t0 >> 35 #endif 9 36 10 /* void *memset(void *, int, size_t) */ !! 37 #define LEGACY_MODE 1 11 SYM_FUNC_START(__memset) !! 38 #define EVA_MODE 2 12 move t0, a0 /* Preserve return value !! 39 13 !! 40 /* 14 /* Defer to byte-oriented fill for sma !! 41 * No need to protect it with EVA #ifdefery. The generated block of code 15 sltiu a3, a2, 16 !! 42 * will never be assembled if EVA is not enabled. 16 bnez a3, 4f !! 43 */ >> 44 #define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr) >> 45 #define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr) >> 46 >> 47 #define EX(insn,reg,addr,handler) \ >> 48 .if \mode == LEGACY_MODE; \ >> 49 9: insn reg, addr; \ >> 50 .else; \ >> 51 9: ___BUILD_EVA_INSN(insn, reg, addr); \ >> 52 .endif; \ >> 53 .section __ex_table,"a"; \ >> 54 PTR 9b, handler; \ >> 55 .previous >> 56 >> 57 .macro f_fill64 dst, offset, val, fixup, mode >> 58 EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) >> 59 EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) >> 60 EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) >> 61 EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) >> 62 #if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) >> 63 EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) >> 64 EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) >> 65 EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) >> 66 EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) >> 67 #endif >> 68 #if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) >> 69 EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) >> 70 EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) >> 71 EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) >> 72 EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) >> 73 EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) >> 74 EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) >> 75 EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) >> 76 EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) >> 77 #endif >> 78 .endm >> 79 >> 80 .set noreorder >> 81 .align 5 17 82 18 /* 83 /* 19 * Round to nearest XLEN-aligned addre !! 84 * Macro to generate the __bzero{,_user} symbol 20 * greater than or equal to start addr !! 85 * Arguments: >> 86 * mode: LEGACY_MODE or EVA_MODE 21 */ 87 */ 22 addi a3, t0, SZREG-1 !! 88 .macro __BUILD_BZERO mode 23 andi a3, a3, ~(SZREG-1) !! 89 /* Initialize __memset if this is the first time we call this macro */ 24 beq a3, t0, 2f /* Skip if already ali !! 90 .ifnotdef __memset 25 /* Handle initial misalignment */ !! 91 .set __memset, 1 26 sub a4, a3, t0 !! 92 .hidden __memset /* Make sure it does not leak */ >> 93 .endif >> 94 >> 95 sltiu t0, a2, STORSIZE /* very small region? */ >> 96 bnez t0, .Lsmall_memset\@ >> 97 andi t0, a0, STORMASK /* aligned? */ >> 98 >> 99 #ifdef CONFIG_CPU_MICROMIPS >> 100 move t8, a1 /* used by 'swp' instruction */ >> 101 move t9, a1 >> 102 #endif >> 103 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS >> 104 beqz t0, 1f >> 105 PTR_SUBU t0, STORSIZE /* alignment in bytes */ >> 106 #else >> 107 .set noat >> 108 li AT, STORSIZE >> 109 beqz t0, 1f >> 110 PTR_SUBU t0, AT /* alignment in bytes */ >> 111 .set at >> 112 #endif >> 113 >> 114 #ifndef CONFIG_CPU_MIPSR6 >> 115 R10KCBARRIER(0(ra)) >> 116 #ifdef __MIPSEB__ >> 117 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ >> 118 #else >> 119 EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ >> 120 #endif >> 121 PTR_SUBU a0, t0 /* long align ptr */ >> 122 PTR_ADDU a2, t0 /* correct size */ >> 123 >> 124 #else /* CONFIG_CPU_MIPSR6 */ >> 125 #define STORE_BYTE(N) \ >> 126 EX(sb, a1, N(a0), .Lbyte_fixup\@); \ >> 127 beqz t0, 0f; \ >> 128 PTR_ADDU t0, 1; >> 129 >> 130 PTR_ADDU a2, t0 /* correct size */ >> 131 PTR_ADDU t0, 1 >> 132 STORE_BYTE(0) >> 133 STORE_BYTE(1) >> 134 #if LONGSIZE == 4 >> 135 EX(sb, a1, 2(a0), .Lbyte_fixup\@) >> 136 #else >> 137 STORE_BYTE(2) >> 138 STORE_BYTE(3) >> 139 STORE_BYTE(4) >> 140 STORE_BYTE(5) >> 141 EX(sb, a1, 6(a0), .Lbyte_fixup\@) >> 142 #endif >> 143 0: >> 144 ori a0, STORMASK >> 145 xori a0, STORMASK >> 146 PTR_ADDIU a0, STORSIZE >> 147 #endif /* CONFIG_CPU_MIPSR6 */ >> 148 1: ori t1, a2, 0x3f /* # of full blocks */ >> 149 xori t1, 0x3f >> 150 beqz t1, .Lmemset_partial\@ /* no block to fill */ >> 151 andi t0, a2, 0x40-STORSIZE >> 152 >> 153 PTR_ADDU t1, a0 /* end address */ >> 154 .set reorder >> 155 1: PTR_ADDIU a0, 64 >> 156 R10KCBARRIER(0(ra)) >> 157 f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode >> 158 bne t1, a0, 1b >> 159 .set noreorder >> 160 >> 161 .Lmemset_partial\@: >> 162 R10KCBARRIER(0(ra)) >> 163 PTR_LA t1, 2f /* where to start */ >> 164 #ifdef CONFIG_CPU_MICROMIPS >> 165 LONG_SRL t7, t0, 1 >> 166 #endif >> 167 #if LONGSIZE == 4 >> 168 PTR_SUBU t1, FILLPTRG >> 169 #else >> 170 .set noat >> 171 LONG_SRL AT, FILLPTRG, 1 >> 172 PTR_SUBU t1, AT >> 173 .set at >> 174 #endif >> 175 jr t1 >> 176 PTR_ADDU a0, t0 /* dest ptr */ >> 177 >> 178 .set push >> 179 .set noreorder >> 180 .set nomacro >> 181 /* ... but first do longs ... */ >> 182 f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode >> 183 2: .set pop >> 184 andi a2, STORMASK /* At most one long to go */ >> 185 >> 186 beqz a2, 1f >> 187 #ifndef CONFIG_CPU_MIPSR6 >> 188 PTR_ADDU a0, a2 /* What's left */ >> 189 R10KCBARRIER(0(ra)) >> 190 #ifdef __MIPSEB__ >> 191 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) >> 192 #else >> 193 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) >> 194 #endif >> 195 #else >> 196 PTR_SUBU t0, $0, a2 >> 197 PTR_ADDIU t0, 1 >> 198 STORE_BYTE(0) >> 199 STORE_BYTE(1) >> 200 #if LONGSIZE == 4 >> 201 EX(sb, a1, 2(a0), .Lbyte_fixup\@) >> 202 #else >> 203 STORE_BYTE(2) >> 204 STORE_BYTE(3) >> 205 STORE_BYTE(4) >> 206 STORE_BYTE(5) >> 207 EX(sb, a1, 6(a0), .Lbyte_fixup\@) >> 208 #endif >> 209 0: >> 210 #endif >> 211 1: jr ra >> 212 move a2, zero >> 213 >> 214 .Lsmall_memset\@: >> 215 beqz a2, 2f >> 216 PTR_ADDU t1, a0, a2 >> 217 >> 218 1: PTR_ADDIU a0, 1 /* fill bytewise */ >> 219 R10KCBARRIER(0(ra)) >> 220 bne t1, a0, 1b >> 221 sb a1, -1(a0) >> 222 >> 223 2: jr ra /* done */ >> 224 move a2, zero >> 225 .if __memset == 1 >> 226 END(memset) >> 227 .set __memset, 0 >> 228 .hidden __memset >> 229 .endif >> 230 >> 231 #ifdef CONFIG_CPU_MIPSR6 >> 232 .Lbyte_fixup\@: >> 233 PTR_SUBU a2, $0, t0 >> 234 jr ra >> 235 PTR_ADDIU a2, 1 >> 236 #endif /* CONFIG_CPU_MIPSR6 */ >> 237 >> 238 .Lfirst_fixup\@: >> 239 jr ra >> 240 nop >> 241 >> 242 .Lfwd_fixup\@: >> 243 PTR_L t0, TI_TASK($28) >> 244 andi a2, 0x3f >> 245 LONG_L t0, THREAD_BUADDR(t0) >> 246 LONG_ADDU a2, t1 >> 247 jr ra >> 248 LONG_SUBU a2, t0 >> 249 >> 250 .Lpartial_fixup\@: >> 251 PTR_L t0, TI_TASK($28) >> 252 andi a2, STORMASK >> 253 LONG_L t0, THREAD_BUADDR(t0) >> 254 LONG_ADDU a2, t1 >> 255 jr ra >> 256 LONG_SUBU a2, t0 >> 257 >> 258 .Llast_fixup\@: >> 259 jr ra >> 260 andi v1, a2, STORMASK >> 261 >> 262 .endm >> 263 >> 264 /* >> 265 * memset(void *s, int c, size_t n) >> 266 * >> 267 * a0: start of area to clear >> 268 * a1: char to fill with >> 269 * a2: size of area to clear >> 270 */ >> 271 >> 272 LEAF(memset) >> 273 beqz a1, 1f >> 274 move v0, a0 /* result */ >> 275 >> 276 andi a1, 0xff /* spread fillword */ >> 277 LONG_SLL t1, a1, 8 >> 278 or a1, t1 >> 279 LONG_SLL t1, a1, 16 >> 280 #if LONGSIZE == 8 >> 281 or a1, t1 >> 282 LONG_SLL t1, a1, 32 >> 283 #endif >> 284 or a1, t1 27 1: 285 1: 28 sb a1, 0(t0) !! 286 #ifndef CONFIG_EVA 29 addi t0, t0, 1 !! 287 FEXPORT(__bzero) 30 bltu t0, a3, 1b !! 288 #else 31 sub a2, a2, a4 /* Update count */ !! 289 FEXPORT(__bzero_kernel) 32 !! 290 #endif 33 2: /* Duff's device with 32 XLEN stores per it !! 291 __BUILD_BZERO LEGACY_MODE 34 /* Broadcast value into all bytes */ !! 292 35 andi a1, a1, 0xff !! 293 #ifdef CONFIG_EVA 36 slli a3, a1, 8 !! 294 LEAF(__bzero) 37 or a1, a3, a1 !! 295 __BUILD_BZERO EVA_MODE 38 slli a3, a1, 16 !! 296 END(__bzero) 39 or a1, a3, a1 !! 297 #endif 40 #ifdef CONFIG_64BIT << 41 slli a3, a1, 32 << 42 or a1, a3, a1 << 43 #endif << 44 << 45 /* Calculate end address */ << 46 andi a4, a2, ~(SZREG-1) << 47 add a3, t0, a4 << 48 << 49 andi a4, a4, 31*SZREG /* Calculate re << 50 beqz a4, 3f /* Shortcut if << 51 neg a4, a4 << 52 addi a4, a4, 32*SZREG /* Calculate in << 53 << 54 /* Adjust start address with offset */ << 55 sub t0, t0, a4 << 56 << 57 /* Jump into loop body */ << 58 /* Assumes 32-bit instruction lengths << 59 la a5, 3f << 60 #ifdef CONFIG_64BIT << 61 srli a4, a4, 1 << 62 #endif << 63 add a5, a5, a4 << 64 jr a5 << 65 3: << 66 REG_S a1, 0(t0) << 67 REG_S a1, SZREG(t0) << 68 REG_S a1, 2*SZREG(t0) << 69 REG_S a1, 3*SZREG(t0) << 70 REG_S a1, 4*SZREG(t0) << 71 REG_S a1, 5*SZREG(t0) << 72 REG_S a1, 6*SZREG(t0) << 73 REG_S a1, 7*SZREG(t0) << 74 REG_S a1, 8*SZREG(t0) << 75 REG_S a1, 9*SZREG(t0) << 76 REG_S a1, 10*SZREG(t0) << 77 REG_S a1, 11*SZREG(t0) << 78 REG_S a1, 12*SZREG(t0) << 79 REG_S a1, 13*SZREG(t0) << 80 REG_S a1, 14*SZREG(t0) << 81 REG_S a1, 15*SZREG(t0) << 82 REG_S a1, 16*SZREG(t0) << 83 REG_S a1, 17*SZREG(t0) << 84 REG_S a1, 18*SZREG(t0) << 85 REG_S a1, 19*SZREG(t0) << 86 REG_S a1, 20*SZREG(t0) << 87 REG_S a1, 21*SZREG(t0) << 88 REG_S a1, 22*SZREG(t0) << 89 REG_S a1, 23*SZREG(t0) << 90 REG_S a1, 24*SZREG(t0) << 91 REG_S a1, 25*SZREG(t0) << 92 REG_S a1, 26*SZREG(t0) << 93 REG_S a1, 27*SZREG(t0) << 94 REG_S a1, 28*SZREG(t0) << 95 REG_S a1, 29*SZREG(t0) << 96 REG_S a1, 30*SZREG(t0) << 97 REG_S a1, 31*SZREG(t0) << 98 addi t0, t0, 32*SZREG << 99 bltu t0, a3, 3b << 100 andi a2, a2, SZREG-1 /* Update count << 101 << 102 4: << 103 /* Handle trailing misalignment */ << 104 beqz a2, 6f << 105 add a3, t0, a2 << 106 5: << 107 sb a1, 0(t0) << 108 addi t0, t0, 1 << 109 bltu t0, a3, 5b << 110 6: << 111 ret << 112 SYM_FUNC_END(__memset) << 113 SYM_FUNC_ALIAS_WEAK(memset, __memset) << 114 SYM_FUNC_ALIAS(__pi_memset, __memset) << 115 SYM_FUNC_ALIAS(__pi___memset, __memset) <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.