1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * arch/alpha/lib/ev6-stxcpy.S 4 * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> 5 * 6 * Copy a null-terminated string from SRC to DST. 7 * 8 * This is an internal routine used by strcpy, stpcpy, and strcat. 9 * As such, it uses special linkage conventions to make implementation 10 * of these public functions more efficient. 11 * 12 * On input: 13 * t9 = return address 14 * a0 = DST 15 * a1 = SRC 16 * 17 * On output: 18 * t12 = bitmask (with one bit set) indicating the last byte written 19 * a0 = unaligned address of the last *word* written 20 * 21 * Furthermore, v0, a3-a5, t11, and t12 are untouched. 22 * 23 * Much of the information about 21264 scheduling/coding comes from: 24 * Compiler Writer's Guide for the Alpha 21264 25 * abbreviated as 'CWG' in other comments here 26 * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html 27 * Scheduling notation: 28 * E - either cluster 29 * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 30 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 31 * Try not to change the actual algorithm if possible for consistency. 32 */ 33 34 #include <asm/regdef.h> 35 36 .set noat 37 .set noreorder 38 39 .text 40 41 /* There is a problem with either gdb (as of 4.16) or gas (as of 2.7) that 42 doesn't like putting the entry point for a procedure somewhere in the 43 middle of the procedure descriptor. Work around this by putting the 44 aligned copy in its own procedure descriptor */ 45 46 47 .ent stxcpy_aligned 48 .align 4 49 stxcpy_aligned: 50 .frame sp, 0, t9 51 .prologue 0 52 53 /* On entry to this basic block: 54 t0 == the first destination word for masking back in 55 t1 == the first source word. */ 56 57 /* Create the 1st output word and detect 0's in the 1st input word. */ 58 lda t2, -1 # E : build a mask against false zero 59 mskqh t2, a1, t2 # U : detection in the src word (stall) 60 mskqh t1, a1, t3 # U : 61 ornot t1, t2, t2 # E : (stall) 62 63 mskql t0, a1, t0 # U : assemble the first output word 64 cmpbge zero, t2, t8 # E : bits set iff null found 65 or t0, t3, t1 # E : (stall) 66 bne t8, $a_eos # U : (stall) 67 68 /* On entry to this basic block: 69 t0 == the first destination word for masking back in 70 t1 == a source word not containing a null. */ 71 /* Nops here to separate store quads from load quads */ 72 73 $a_loop: 74 stq_u t1, 0(a0) # L : 75 addq a0, 8, a0 # E : 76 nop 77 nop 78 79 ldq_u t1, 0(a1) # L : Latency=3 80 addq a1, 8, a1 # E : 81 cmpbge zero, t1, t8 # E : (3 cycle stall) 82 beq t8, $a_loop # U : (stall for t8) 83 84 /* Take care of the final (partial) word store. 85 On entry to this basic block we have: 86 t1 == the source word containing the null 87 t8 == the cmpbge mask that found it. */ 88 $a_eos: 89 negq t8, t6 # E : find low bit set 90 and t8, t6, t12 # E : (stall) 91 /* For the sake of the cache, don't read a destination word 92 if we're not going to need it. */ 93 and t12, 0x80, t6 # E : (stall) 94 bne t6, 1f # U : (stall) 95 96 /* We're doing a partial word store and so need to combine 97 our source and original destination words. */ 98 ldq_u t0, 0(a0) # L : Latency=3 99 subq t12, 1, t6 # E : 100 zapnot t1, t6, t1 # U : clear src bytes >= null (stall) 101 or t12, t6, t8 # E : (stall) 102 103 zap t0, t8, t0 # E : clear dst bytes <= null 104 or t0, t1, t1 # E : (stall) 105 nop 106 nop 107 108 1: stq_u t1, 0(a0) # L : 109 ret (t9) # L0 : Latency=3 110 nop 111 nop 112 113 .end stxcpy_aligned 114 115 .align 4 116 .ent __stxcpy 117 .globl __stxcpy 118 __stxcpy: 119 .frame sp, 0, t9 120 .prologue 0 121 122 /* Are source and destination co-aligned? */ 123 xor a0, a1, t0 # E : 124 unop # E : 125 and t0, 7, t0 # E : (stall) 126 bne t0, $unaligned # U : (stall) 127 128 /* We are co-aligned; take care of a partial first word. */ 129 ldq_u t1, 0(a1) # L : load first src word 130 and a0, 7, t0 # E : take care not to load a word ... 131 addq a1, 8, a1 # E : 132 beq t0, stxcpy_aligned # U : ... if we wont need it (stall) 133 134 ldq_u t0, 0(a0) # L : 135 br stxcpy_aligned # L0 : Latency=3 136 nop 137 nop 138 139 140 /* The source and destination are not co-aligned. Align the destination 141 and cope. We have to be very careful about not reading too much and 142 causing a SEGV. */ 143 144 .align 4 145 $u_head: 146 /* We know just enough now to be able to assemble the first 147 full source word. We can still find a zero at the end of it 148 that prevents us from outputting the whole thing. 149 150 On entry to this basic block: 151 t0 == the first dest word, for masking back in, if needed else 0 152 t1 == the low bits of the first source word 153 t6 == bytemask that is -1 in dest word bytes */ 154 155 ldq_u t2, 8(a1) # L : 156 addq a1, 8, a1 # E : 157 extql t1, a1, t1 # U : (stall on a1) 158 extqh t2, a1, t4 # U : (stall on a1) 159 160 mskql t0, a0, t0 # U : 161 or t1, t4, t1 # E : 162 mskqh t1, a0, t1 # U : (stall on t1) 163 or t0, t1, t1 # E : (stall on t1) 164 165 or t1, t6, t6 # E : 166 cmpbge zero, t6, t8 # E : (stall) 167 lda t6, -1 # E : for masking just below 168 bne t8, $u_final # U : (stall) 169 170 mskql t6, a1, t6 # U : mask out the bits we have 171 or t6, t2, t2 # E : already extracted before (stall) 172 cmpbge zero, t2, t8 # E : testing eos (stall) 173 bne t8, $u_late_head_exit # U : (stall) 174 175 /* Finally, we've got all the stupid leading edge cases taken care 176 of and we can set up to enter the main loop. */ 177 178 stq_u t1, 0(a0) # L : store first output word 179 addq a0, 8, a0 # E : 180 extql t2, a1, t0 # U : position ho-bits of lo word 181 ldq_u t2, 8(a1) # U : read next high-order source word 182 183 addq a1, 8, a1 # E : 184 cmpbge zero, t2, t8 # E : (stall for t2) 185 nop # E : 186 bne t8, $u_eos # U : (stall) 187 188 /* Unaligned copy main loop. In order to avoid reading too much, 189 the loop is structured to detect zeros in aligned source words. 190 This has, unfortunately, effectively pulled half of a loop 191 iteration out into the head and half into the tail, but it does 192 prevent nastiness from accumulating in the very thing we want 193 to run as fast as possible. 194 195 On entry to this basic block: 196 t0 == the shifted high-order bits from the previous source word 197 t2 == the unshifted current source word 198 199 We further know that t2 does not contain a null terminator. */ 200 201 .align 3 202 $u_loop: 203 extqh t2, a1, t1 # U : extract high bits for current word 204 addq a1, 8, a1 # E : (stall) 205 extql t2, a1, t3 # U : extract low bits for next time (stall) 206 addq a0, 8, a0 # E : 207 208 or t0, t1, t1 # E : current dst word now complete 209 ldq_u t2, 0(a1) # L : Latency=3 load high word for next time 210 stq_u t1, -8(a0) # L : save the current word (stall) 211 mov t3, t0 # E : 212 213 cmpbge zero, t2, t8 # E : test new word for eos 214 beq t8, $u_loop # U : (stall) 215 nop 216 nop 217 218 /* We've found a zero somewhere in the source word we just read. 219 If it resides in the lower half, we have one (probably partial) 220 word to write out, and if it resides in the upper half, we 221 have one full and one partial word left to write out. 222 223 On entry to this basic block: 224 t0 == the shifted high-order bits from the previous source word 225 t2 == the unshifted current source word. */ 226 $u_eos: 227 extqh t2, a1, t1 # U : 228 or t0, t1, t1 # E : first (partial) source word complete (stall) 229 cmpbge zero, t1, t8 # E : is the null in this first bit? (stall) 230 bne t8, $u_final # U : (stall) 231 232 $u_late_head_exit: 233 stq_u t1, 0(a0) # L : the null was in the high-order bits 234 addq a0, 8, a0 # E : 235 extql t2, a1, t1 # U : 236 cmpbge zero, t1, t8 # E : (stall) 237 238 /* Take care of a final (probably partial) result word. 239 On entry to this basic block: 240 t1 == assembled source word 241 t8 == cmpbge mask that found the null. */ 242 $u_final: 243 negq t8, t6 # E : isolate low bit set 244 and t6, t8, t12 # E : (stall) 245 and t12, 0x80, t6 # E : avoid dest word load if we can (stall) 246 bne t6, 1f # U : (stall) 247 248 ldq_u t0, 0(a0) # E : 249 subq t12, 1, t6 # E : 250 or t6, t12, t8 # E : (stall) 251 zapnot t1, t6, t1 # U : kill source bytes >= null (stall) 252 253 zap t0, t8, t0 # U : kill dest bytes <= null (2 cycle data stall) 254 or t0, t1, t1 # E : (stall) 255 nop 256 nop 257 258 1: stq_u t1, 0(a0) # L : 259 ret (t9) # L0 : Latency=3 260 nop 261 nop 262 263 /* Unaligned copy entry point. */ 264 .align 4 265 $unaligned: 266 267 ldq_u t1, 0(a1) # L : load first source word 268 and a0, 7, t4 # E : find dest misalignment 269 and a1, 7, t5 # E : find src misalignment 270 /* Conditionally load the first destination word and a bytemask 271 with 0xff indicating that the destination byte is sacrosanct. */ 272 mov zero, t0 # E : 273 274 mov zero, t6 # E : 275 beq t4, 1f # U : 276 ldq_u t0, 0(a0) # L : 277 lda t6, -1 # E : 278 279 mskql t6, a0, t6 # U : 280 nop 281 nop 282 nop 283 1: 284 subq a1, t4, a1 # E : sub dest misalignment from src addr 285 /* If source misalignment is larger than dest misalignment, we need 286 extra startup checks to avoid SEGV. */ 287 cmplt t4, t5, t12 # E : 288 beq t12, $u_head # U : 289 lda t2, -1 # E : mask out leading garbage in source 290 291 mskqh t2, t5, t2 # U : 292 ornot t1, t2, t3 # E : (stall) 293 cmpbge zero, t3, t8 # E : is there a zero? (stall) 294 beq t8, $u_head # U : (stall) 295 296 /* At this point we've found a zero in the first partial word of 297 the source. We need to isolate the valid source data and mask 298 it into the original destination data. (Incidentally, we know 299 that we'll need at least one byte of that original dest word.) */ 300 301 ldq_u t0, 0(a0) # L : 302 negq t8, t6 # E : build bitmask of bytes <= zero 303 and t6, t8, t12 # E : (stall) 304 and a1, 7, t5 # E : 305 306 subq t12, 1, t6 # E : 307 or t6, t12, t8 # E : (stall) 308 srl t12, t5, t12 # U : adjust final null return value 309 zapnot t2, t8, t2 # U : prepare source word; mirror changes (stall) 310 311 and t1, t2, t1 # E : to source validity mask 312 extql t2, a1, t2 # U : 313 extql t1, a1, t1 # U : (stall) 314 andnot t0, t2, t0 # .. e1 : zero place for source to reside (stall) 315 316 or t0, t1, t1 # e1 : and put it there 317 stq_u t1, 0(a0) # .. e0 : (stall) 318 ret (t9) # e1 : 319 nop 320 321 .end __stxcpy 322
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.