1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * arch/alpha/lib/stxncpy.S 4 * Contributed by Richard Henderson (rth@tamu. 5 * 6 * Copy no more than COUNT bytes of the null-t 7 * SRC to DST. 8 * 9 * This is an internal routine used by strncpy 10 * As such, it uses special linkage convention 11 * of these public functions more efficient. 12 * 13 * On input: 14 * t9 = return address 15 * a0 = DST 16 * a1 = SRC 17 * a2 = COUNT 18 * 19 * Furthermore, COUNT may not be zero. 20 * 21 * On output: 22 * t0 = last word written 23 * t10 = bitmask (with one bit set) indic 24 * the end of the range specified b 25 * t12 = bitmask (with one bit set) indic 26 * a0 = unaligned address of the last *w 27 * a2 = the number of full words left in 28 * 29 * Furthermore, v0, a3-a5, t11, and $at are un 30 */ 31 32 #include <asm/regdef.h> 33 34 .set noat 35 .set noreorder 36 37 .text 38 39 /* There is a problem with either gdb (as of 4 40 doesn't like putting the entry point for a 41 middle of the procedure descriptor. Work a 42 aligned copy in its own procedure descripto 43 44 .ent stxncpy_aligned 45 .align 3 46 stxncpy_aligned: 47 .frame sp, 0, t9, 0 48 .prologue 0 49 50 /* On entry to this basic block: 51 t0 == the first destination word fo 52 t1 == the first source word. */ 53 54 /* Create the 1st output word and dete 55 lda t2, -1 # e1 : buil 56 mskqh t2, a1, t2 # e0 : de 57 mskqh t1, a1, t3 # e0 : 58 ornot t1, t2, t2 # .. e1 : 59 mskql t0, a1, t0 # e0 : asse 60 cmpbge zero, t2, t8 # .. e1 : bits 61 or t0, t3, t0 # e0 : 62 beq a2, $a_eoc # .. e1 : 63 bne t8, $a_eos # .. e1 : 64 65 /* On entry to this basic block: 66 t0 == a source word not containing 67 68 $a_loop: 69 stq_u t0, 0(a0) # e0 : 70 addq a0, 8, a0 # .. e1 : 71 ldq_u t0, 0(a1) # e0 : 72 addq a1, 8, a1 # .. e1 : 73 subq a2, 1, a2 # e0 : 74 cmpbge zero, t0, t8 # .. e1 (stall 75 beq a2, $a_eoc # e1 : 76 beq t8, $a_loop # e1 : 77 78 /* Take care of the final (partial) wo 79 the end-of-count bit is set in t8 i 80 81 On entry to this basic block we hav 82 t0 == the source word containing th 83 t8 == the cmpbge mask that found it 84 85 $a_eos: 86 negq t8, t12 # e0 : find 87 and t8, t12, t12 # e1 (stall) 88 89 /* For the sake of the cache, don't re 90 if we're not going to need it. */ 91 and t12, 0x80, t6 # e0 : 92 bne t6, 1f # .. e1 (zdb) 93 94 /* We're doing a partial word store an 95 our source and original destination 96 ldq_u t1, 0(a0) # e0 : 97 subq t12, 1, t6 # .. e1 : 98 or t12, t6, t8 # e0 : 99 unop # 100 zapnot t0, t8, t0 # e0 : clea 101 zap t1, t8, t1 # .. e1 : clea 102 or t0, t1, t0 # e1 : 103 104 1: stq_u t0, 0(a0) # e0 : 105 ret (t9) # e1 : 106 107 /* Add the end-of-count bit to the eos 108 $a_eoc: 109 or t10, t8, t8 110 br $a_eos 111 112 .end stxncpy_aligned 113 114 .align 3 115 .ent __stxncpy 116 .globl __stxncpy 117 __stxncpy: 118 .frame sp, 0, t9, 0 119 .prologue 0 120 121 /* Are source and destination co-align 122 xor a0, a1, t1 # e0 : 123 and a0, 7, t0 # .. e1 : find 124 and t1, 7, t1 # e0 : 125 addq a2, t0, a2 # .. e1 : bias 126 subq a2, 1, a2 # e0 : 127 and a2, 7, t2 # e1 : 128 srl a2, 3, a2 # e0 : a2 = 129 addq zero, 1, t10 # .. e1 : 130 sll t10, t2, t10 # e0 : t10 131 bne t1, $unaligned # .. e1 : 132 133 /* We are co-aligned; take care of a p 134 135 ldq_u t1, 0(a1) # e0 : load 136 addq a1, 8, a1 # .. e1 : 137 138 beq t0, stxncpy_aligned # avoi 139 ldq_u t0, 0(a0) # e0 : 140 br stxncpy_aligned # .. e1 : 141 142 143 /* The source and destination are not co-align 144 and cope. We have to be very careful about 145 causing a SEGV. */ 146 147 .align 3 148 $u_head: 149 /* We know just enough now to be able 150 full source word. We can still fin 151 that prevents us from outputting th 152 153 On entry to this basic block: 154 t0 == the first dest word, unmasked 155 t1 == the shifted low bits of the f 156 t6 == bytemask that is -1 in dest w 157 158 ldq_u t2, 8(a1) # e0 : load 159 addq a1, 8, a1 # .. e1 : 160 mskql t0, a0, t0 # e0 : mask 161 extqh t2, a1, t4 # e0 : 162 or t1, t4, t1 # e1 : firs 163 mskqh t1, a0, t1 # e0 : mask 164 or t0, t1, t0 # e0 : firs 165 or t0, t6, t6 # e1 : mask 166 cmpbge zero, t6, t8 # e0 : 167 beq a2, $u_eocfin # .. e1 : 168 lda t6, -1 # e0 : 169 bne t8, $u_final # .. e1 : 170 171 mskql t6, a1, t6 # e0 : mask 172 nop # .. e1 : 173 stq_u t0, 0(a0) # e0 : stor 174 or t6, t2, t2 # .. e1 : 175 cmpbge zero, t2, t8 # e0 : find 176 addq a0, 8, a0 # .. e1 : 177 subq a2, 1, a2 # e0 : 178 bne t8, $u_late_head_exit # .. e 179 180 /* Finally, we've got all the stupid l 181 of and we can set up to enter the m 182 183 extql t2, a1, t1 # e0 : posi 184 beq a2, $u_eoc # .. e1 : 185 ldq_u t2, 8(a1) # e0 : read 186 addq a1, 8, a1 # .. e1 : 187 extqh t2, a1, t0 # e0 : posi 188 cmpbge zero, t2, t8 # .. e1 : 189 nop # e0 : 190 bne t8, $u_eos # .. e1 : 191 192 /* Unaligned copy main loop. In order 193 the loop is structured to detect ze 194 This has, unfortunately, effectivel 195 iteration out into the head and hal 196 prevent nastiness from accumulating 197 to run as fast as possible. 198 199 On entry to this basic block: 200 t0 == the shifted low-order bits fr 201 t1 == the shifted high-order bits f 202 t2 == the unshifted current source 203 204 We further know that t2 does not co 205 206 .align 3 207 $u_loop: 208 or t0, t1, t0 # e0 : curr 209 subq a2, 1, a2 # .. e1 : decr 210 stq_u t0, 0(a0) # e0 : save 211 addq a0, 8, a0 # .. e1 : 212 extql t2, a1, t1 # e0 : extr 213 beq a2, $u_eoc # .. e1 : 214 ldq_u t2, 8(a1) # e0 : load 215 addq a1, 8, a1 # .. e1 : 216 nop # e0 : 217 cmpbge zero, t2, t8 # e1 : test 218 extqh t2, a1, t0 # e0 : extr 219 beq t8, $u_loop # .. e1 : 220 221 /* We've found a zero somewhere in the 222 If it resides in the lower half, we 223 word to write out, and if it reside 224 have one full and one partial word 225 226 On entry to this basic block: 227 t0 == the shifted low-order bits fr 228 t1 == the shifted high-order bits f 229 t2 == the unshifted current source 230 $u_eos: 231 or t0, t1, t0 # e0 : firs 232 nop # .. e1 : 233 cmpbge zero, t0, t8 # e0 : is t 234 bne t8, $u_final # .. e1 (zdb) 235 236 stq_u t0, 0(a0) # e0 : the 237 addq a0, 8, a0 # .. e1 : 238 subq a2, 1, a2 # e1 : 239 240 $u_late_head_exit: 241 extql t2, a1, t0 # .. e0 : 242 cmpbge zero, t0, t8 # e0 : 243 or t8, t10, t6 # e1 : 244 cmoveq a2, t6, t8 # e0 : 245 nop # .. e1 : 246 247 /* Take care of a final (probably part 248 On entry to this basic block: 249 t0 == assembled source word 250 t8 == cmpbge mask that found the nu 251 $u_final: 252 negq t8, t6 # e0 : isol 253 and t6, t8, t12 # e1 : 254 255 and t12, 0x80, t6 # e0 : avoi 256 bne t6, 1f # .. e1 (zdb) 257 258 ldq_u t1, 0(a0) # e0 : 259 subq t12, 1, t6 # .. e1 : 260 or t6, t12, t8 # e0 : 261 zapnot t0, t8, t0 # .. e1 : kill 262 zap t1, t8, t1 # e0 : kill 263 or t0, t1, t0 # e1 : 264 265 1: stq_u t0, 0(a0) # e0 : 266 ret (t9) # .. e1 : 267 268 /* Got to end-of-count before end of s 269 On entry to this basic block: 270 t1 == the shifted high-order bits f 271 $u_eoc: 272 and a1, 7, t6 # e1 : 273 sll t10, t6, t6 # e0 : 274 and t6, 0xff, t6 # e0 : 275 bne t6, 1f # .. e1 : 276 277 ldq_u t2, 8(a1) # e0 : load 278 nop # .. e1 : 279 extqh t2, a1, t0 # e0 : extr 280 or t1, t0, t1 # e1 : 281 282 1: cmpbge zero, t1, t8 283 mov t1, t0 284 285 $u_eocfin: # end-of-count 286 or t10, t8, t8 287 br $u_final 288 289 /* Unaligned copy entry point. */ 290 .align 3 291 $unaligned: 292 293 ldq_u t1, 0(a1) # e0 : load 294 295 and a0, 7, t4 # .. e1 : find 296 and a1, 7, t5 # e0 : find 297 298 /* Conditionally load the first destin 299 with 0xff indicating that the desti 300 301 mov zero, t0 # .. e1 : 302 mov zero, t6 # e0 : 303 beq t4, 1f # .. e1 : 304 ldq_u t0, 0(a0) # e0 : 305 lda t6, -1 # .. e1 : 306 mskql t6, a0, t6 # e0 : 307 subq a1, t4, a1 # .. e1 : sub 308 309 /* If source misalignment is larger th 310 extra startup checks to avoid SEGV. 311 312 1: cmplt t4, t5, t12 # e1 : 313 extql t1, a1, t1 # .. e0 : shif 314 lda t2, -1 # e0 : for 315 beq t12, $u_head # .. e1 : 316 317 extql t2, a1, t2 # e0 : 318 cmpbge zero, t1, t8 # .. e1 : is t 319 andnot t2, t6, t2 # e0 : dest 320 or t8, t10, t5 # .. e1 : test 321 cmpbge zero, t2, t3 # e0 : 322 cmoveq a2, t5, t8 # .. e1 : 323 andnot t8, t3, t8 # e0 : 324 beq t8, $u_head # .. e1 (zdb) 325 326 /* At this point we've found a zero in 327 the source. We need to isolate the 328 it into the original destination da 329 that we'll need at least one byte o 330 331 ldq_u t0, 0(a0) # e0 : 332 negq t8, t6 # .. e1 : buil 333 mskqh t1, t4, t1 # e0 : 334 and t6, t8, t12 # .. e1 : 335 subq t12, 1, t6 # e0 : 336 or t6, t12, t8 # e1 : 337 338 zapnot t2, t8, t2 # e0 : prep 339 zapnot t1, t8, t1 # .. e1 : to s 340 341 andnot t0, t2, t0 # e0 : zero 342 or t0, t1, t0 # e1 : and 343 stq_u t0, 0(a0) # e0 : 344 ret (t9) # .. e1 : 345 346 .end __stxncpy
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.