1 /* SPDX-License-Identifier: GPL-2.0-only */ !! 1 #include <asm/cprefix.h> 2 /* << 3 * Copyright (C) 2004, 2007-2010, 2011-2012 Sy << 4 */ << 5 << 6 #include <linux/linkage.h> << 7 << 8 #ifdef __LITTLE_ENDIAN__ << 9 #define WORD2 r2 << 10 #define SHIFT r3 << 11 #else /* BIG ENDIAN */ << 12 #define WORD2 r3 << 13 #define SHIFT r2 << 14 #endif << 15 2 16 ENTRY_CFI(memcmp) !! 3 .text 17 or r12,r0,r1 !! 4 .align 4 18 asl_s r12,r12,30 !! 5 .global C_LABEL(__memcmp), C_LABEL(memcmp) 19 sub r3,r2,1 !! 6 C_LABEL(__memcmp): 20 brls r2,r12,.Lbytewise !! 7 C_LABEL(memcmp): 21 ld r4,[r0,0] !! 8 #if 1 22 ld r5,[r1,0] !! 9 cmp %o2, 0 23 lsr.f lp_count,r3,3 !! 10 ble L3 24 #ifdef CONFIG_ISA_ARCV2 !! 11 mov 0, %g3 25 /* In ARCv2 a branch can't be the last !! 12 L5: 26 * loop. !! 13 ldub [%o0], %g2 27 * So we move the branch to the start !! 14 ldub [%o1], %g3 28 * after the end, and set up r12 so th !! 15 sub %g2, %g3, %g2 29 * initially. !! 16 mov %g2, %g3 30 */ !! 17 sll %g2, 24, %g2 31 mov_s r12,WORD2 !! 18 32 lpne .Loop_end !! 19 cmp %g2, 0 33 brne WORD2,r12,.Lodd !! 20 bne L3 34 ld WORD2,[r0,4] !! 21 add %o0, 1, %o0 35 #else !! 22 36 lpne .Loop_end !! 23 add %o2, -1, %o2 37 ld_s WORD2,[r0,4] !! 24 38 #endif !! 25 cmp %o2, 0 39 ld_s r12,[r1,4] !! 26 bg L5 40 brne r4,r5,.Leven !! 27 add %o1, 1, %o1 41 ld.a r4,[r0,8] !! 28 L3: 42 ld.a r5,[r1,8] !! 29 sll %g3, 24, %o0 43 #ifdef CONFIG_ISA_ARCV2 !! 30 sra %o0, 24, %o0 44 .Loop_end: !! 31 45 brne WORD2,r12,.Lodd !! 32 retl 46 #else !! 33 nop 47 brne WORD2,r12,.Lodd << 48 .Loop_end: << 49 #endif << 50 asl_s SHIFT,SHIFT,3 << 51 bhs_s .Last_cmp << 52 brne r4,r5,.Leven << 53 ld r4,[r0,4] << 54 ld r5,[r1,4] << 55 #ifdef __LITTLE_ENDIAN__ << 56 nop_s << 57 ; one more load latency cycle << 58 .Last_cmp: << 59 xor r0,r4,r5 << 60 bset r0,r0,SHIFT << 61 sub_s r1,r0,1 << 62 bic_s r1,r1,r0 << 63 norm r1,r1 << 64 b.d .Leven_cmp << 65 and r1,r1,24 << 66 .Leven: << 67 xor r0,r4,r5 << 68 sub_s r1,r0,1 << 69 bic_s r1,r1,r0 << 70 norm r1,r1 << 71 ; slow track insn << 72 and r1,r1,24 << 73 .Leven_cmp: << 74 asl r2,r4,r1 << 75 asl r12,r5,r1 << 76 lsr_s r2,r2,1 << 77 lsr_s r12,r12,1 << 78 j_s.d [blink] << 79 sub r0,r2,r12 << 80 .balign 4 << 81 .Lodd: << 82 xor r0,WORD2,r12 << 83 sub_s r1,r0,1 << 84 bic_s r1,r1,r0 << 85 norm r1,r1 << 86 ; slow track insn << 87 and r1,r1,24 << 88 asl_s r2,r2,r1 << 89 asl_s r12,r12,r1 << 90 lsr_s r2,r2,1 << 91 lsr_s r12,r12,1 << 92 j_s.d [blink] << 93 sub r0,r2,r12 << 94 #else /* BIG ENDIAN */ << 95 .Last_cmp: << 96 neg_s SHIFT,SHIFT << 97 lsr r4,r4,SHIFT << 98 lsr r5,r5,SHIFT << 99 ; slow track insn << 100 .Leven: << 101 sub.f r0,r4,r5 << 102 mov.ne r0,1 << 103 j_s.d [blink] << 104 bset.cs r0,r0,31 << 105 .Lodd: << 106 cmp_s WORD2,r12 << 107 mov_s r0,1 << 108 j_s.d [blink] << 109 bset.cs r0,r0,31 << 110 #endif /* ENDIAN */ << 111 .balign 4 << 112 .Lbytewise: << 113 breq r2,0,.Lnil << 114 ldb r4,[r0,0] << 115 ldb r5,[r1,0] << 116 lsr.f lp_count,r3 << 117 #ifdef CONFIG_ISA_ARCV2 << 118 mov r12,r3 << 119 lpne .Lbyte_end << 120 brne r3,r12,.Lbyte_odd << 121 #else << 122 lpne .Lbyte_end << 123 #endif << 124 ldb_s r3,[r0,1] << 125 ldb r12,[r1,1] << 126 brne r4,r5,.Lbyte_even << 127 ldb.a r4,[r0,2] << 128 ldb.a r5,[r1,2] << 129 #ifdef CONFIG_ISA_ARCV2 << 130 .Lbyte_end: << 131 brne r3,r12,.Lbyte_odd << 132 #else 34 #else 133 brne r3,r12,.Lbyte_odd !! 35 save %sp, -104, %sp 134 .Lbyte_end: !! 36 mov %i2, %o4 >> 37 mov %i0, %o0 >> 38 >> 39 cmp %o4, 15 >> 40 ble L72 >> 41 mov %i1, %i2 >> 42 >> 43 andcc %i2, 3, %g0 >> 44 be L161 >> 45 andcc %o0, 3, %g2 >> 46 L75: >> 47 ldub [%o0], %g3 >> 48 ldub [%i2], %g2 >> 49 add %o0,1, %o0 >> 50 >> 51 subcc %g3, %g2, %i0 >> 52 bne L156 >> 53 add %i2, 1, %i2 >> 54 >> 55 andcc %i2, 3, %g0 >> 56 bne L75 >> 57 add %o4, -1, %o4 >> 58 >> 59 andcc %o0, 3, %g2 >> 60 L161: >> 61 bne,a L78 >> 62 mov %i2, %i1 >> 63 >> 64 mov %o0, %i5 >> 65 mov %i2, %i3 >> 66 srl %o4, 2, %i4 >> 67 >> 68 cmp %i4, 0 >> 69 bge L93 >> 70 mov %i4, %g2 >> 71 >> 72 add %i4, 3, %g2 >> 73 L93: >> 74 sra %g2, 2, %g2 >> 75 sll %g2, 2, %g2 >> 76 sub %i4, %g2, %g2 >> 77 >> 78 cmp %g2, 1 >> 79 be,a L88 >> 80 add %o0, 4, %i5 >> 81 >> 82 bg L94 >> 83 cmp %g2, 2 >> 84 >> 85 cmp %g2, 0 >> 86 be,a L86 >> 87 ld [%o0], %g3 >> 88 >> 89 b L162 >> 90 ld [%i5], %g3 >> 91 L94: >> 92 be L81 >> 93 cmp %g2, 3 >> 94 >> 95 be,a L83 >> 96 add %o0, -4, %i5 >> 97 >> 98 b L162 >> 99 ld [%i5], %g3 >> 100 L81: >> 101 add %o0, -8, %i5 >> 102 ld [%o0], %g3 >> 103 add %i2, -8, %i3 >> 104 ld [%i2], %g2 >> 105 >> 106 b L82 >> 107 add %i4, 2, %i4 >> 108 L83: >> 109 ld [%o0], %g4 >> 110 add %i2, -4, %i3 >> 111 ld [%i2], %g1 >> 112 >> 113 b L84 >> 114 add %i4, 1, %i4 >> 115 L86: >> 116 b L87 >> 117 ld [%i2], %g2 >> 118 L88: >> 119 add %i2, 4, %i3 >> 120 ld [%o0], %g4 >> 121 add %i4, -1, %i4 >> 122 ld [%i2], %g1 >> 123 L95: >> 124 ld [%i5], %g3 >> 125 L162: >> 126 cmp %g4, %g1 >> 127 be L87 >> 128 ld [%i3], %g2 >> 129 >> 130 cmp %g4, %g1 >> 131 L163: >> 132 bleu L114 >> 133 mov -1, %i0 >> 134 >> 135 b L114 >> 136 mov 1, %i0 >> 137 L87: >> 138 ld [%i5 + 4], %g4 >> 139 cmp %g3, %g2 >> 140 bne L163 >> 141 ld [%i3 + 4], %g1 >> 142 L84: >> 143 ld [%i5 + 8], %g3 >> 144 >> 145 cmp %g4, %g1 >> 146 bne L163 >> 147 ld [%i3 + 8], %g2 >> 148 L82: >> 149 ld [%i5 + 12], %g4 >> 150 cmp %g3, %g2 >> 151 bne L163 >> 152 ld [%i3 + 12], %g1 >> 153 >> 154 add %i5, 16, %i5 >> 155 >> 156 addcc %i4, -4, %i4 >> 157 bne L95 >> 158 add %i3, 16, %i3 >> 159 >> 160 cmp %g4, %g1 >> 161 bne L163 >> 162 nop >> 163 >> 164 b L114 >> 165 mov 0, %i0 >> 166 L78: >> 167 srl %o4, 2, %i0 >> 168 and %o0, -4, %i3 >> 169 orcc %i0, %g0, %g3 >> 170 sll %g2, 3, %o7 >> 171 mov 32, %g2 >> 172 >> 173 bge L129 >> 174 sub %g2, %o7, %o1 >> 175 >> 176 add %i0, 3, %g3 >> 177 L129: >> 178 sra %g3, 2, %g2 >> 179 sll %g2, 2, %g2 >> 180 sub %i0, %g2, %g2 >> 181 >> 182 cmp %g2, 1 >> 183 be,a L124 >> 184 ld [%i3], %o3 >> 185 >> 186 bg L130 >> 187 cmp %g2, 2 >> 188 >> 189 cmp %g2, 0 >> 190 be,a L122 >> 191 ld [%i3], %o2 >> 192 >> 193 b L164 >> 194 sll %o3, %o7, %g3 >> 195 L130: >> 196 be L117 >> 197 cmp %g2, 3 >> 198 >> 199 be,a L119 >> 200 ld [%i3], %g1 >> 201 >> 202 b L164 >> 203 sll %o3, %o7, %g3 >> 204 L117: >> 205 ld [%i3], %g4 >> 206 add %i2, -8, %i1 >> 207 ld [%i3 + 4], %o3 >> 208 add %i0, 2, %i0 >> 209 ld [%i2], %i4 >> 210 >> 211 b L118 >> 212 add %i3, -4, %i3 >> 213 L119: >> 214 ld [%i3 + 4], %g4 >> 215 add %i2, -4, %i1 >> 216 ld [%i2], %i5 >> 217 >> 218 b L120 >> 219 add %i0, 1, %i0 >> 220 L122: >> 221 ld [%i3 + 4], %g1 >> 222 ld [%i2], %i4 >> 223 >> 224 b L123 >> 225 add %i3, 4, %i3 >> 226 L124: >> 227 add %i2, 4, %i1 >> 228 ld [%i3 + 4], %o2 >> 229 add %i0, -1, %i0 >> 230 ld [%i2], %i5 >> 231 add %i3, 8, %i3 >> 232 L131: >> 233 sll %o3, %o7, %g3 >> 234 L164: >> 235 srl %o2, %o1, %g2 >> 236 ld [%i3], %g1 >> 237 or %g3, %g2, %g3 >> 238 >> 239 cmp %g3, %i5 >> 240 bne L163 >> 241 ld [%i1], %i4 >> 242 L123: >> 243 sll %o2, %o7, %g3 >> 244 srl %g1, %o1, %g2 >> 245 ld [%i3 + 4], %g4 >> 246 or %g3, %g2, %g3 >> 247 >> 248 cmp %g3, %i4 >> 249 bne L163 >> 250 ld [%i1 + 4], %i5 >> 251 L120: >> 252 sll %g1, %o7, %g3 >> 253 srl %g4, %o1, %g2 >> 254 ld [%i3 + 8], %o3 >> 255 or %g3, %g2, %g3 >> 256 >> 257 cmp %g3, %i5 >> 258 bne L163 >> 259 ld [%i1 + 8], %i4 >> 260 L118: >> 261 sll %g4, %o7, %g3 >> 262 srl %o3, %o1, %g2 >> 263 ld [%i3 + 12], %o2 >> 264 or %g3, %g2, %g3 >> 265 >> 266 cmp %g3, %i4 >> 267 bne L163 >> 268 ld [%i1 + 12], %i5 >> 269 >> 270 add %i3, 16, %i3 >> 271 addcc %i0, -4, %i0 >> 272 bne L131 >> 273 add %i1, 16, %i1 >> 274 >> 275 sll %o3, %o7, %g3 >> 276 srl %o2, %o1, %g2 >> 277 or %g3, %g2, %g3 >> 278 >> 279 cmp %g3, %i5 >> 280 be,a L114 >> 281 mov 0, %i0 >> 282 >> 283 b,a L163 >> 284 L114: >> 285 cmp %i0, 0 >> 286 bne L156 >> 287 and %o4, -4, %g2 >> 288 >> 289 add %o0, %g2, %o0 >> 290 add %i2, %g2, %i2 >> 291 and %o4, 3, %o4 >> 292 L72: >> 293 cmp %o4, 0 >> 294 be L156 >> 295 mov 0, %i0 >> 296 >> 297 ldub [%o0], %g3 >> 298 L165: >> 299 ldub [%i2], %g2 >> 300 add %o0, 1, %o0 >> 301 >> 302 subcc %g3, %g2, %i0 >> 303 bne L156 >> 304 add %i2, 1, %i2 >> 305 >> 306 addcc %o4, -1, %o4 >> 307 bne,a L165 >> 308 ldub [%o0], %g3 >> 309 >> 310 mov 0, %i0 >> 311 L156: >> 312 ret >> 313 restore 135 #endif 314 #endif 136 bcc .Lbyte_even << 137 brne r4,r5,.Lbyte_even << 138 ldb_s r3,[r0,1] << 139 ldb_s r12,[r1,1] << 140 .Lbyte_odd: << 141 j_s.d [blink] << 142 sub r0,r3,r12 << 143 .Lbyte_even: << 144 j_s.d [blink] << 145 sub r0,r4,r5 << 146 .Lnil: << 147 j_s.d [blink] << 148 mov r0,0 << 149 END_CFI(memcmp) <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.