1 // SPDX-License-Identifier: GPL-2.0 1 2 /* 3 * linux/arch/alpha/lib/memcpy.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 */ 7 8 /* 9 * This is a reasonably optimized memcpy() rou 10 */ 11 12 /* 13 * Note that the C code is written to be optim 14 * at this point gcc is unable to sanely compi 15 * explicit compare against 0 (instead of just 16 * "bge reg, xx"). I hope alpha-gcc will be fi 17 */ 18 19 #include <linux/types.h> 20 #include <linux/export.h> 21 #include <linux/string.h> 22 23 /* 24 * This should be done in one go with ldq_u*2/ 25 * with a macro so that we can fix it up later 26 */ 27 #define ALIGN_DEST_TO8_UP(d,s,n) \ 28 while (d & 7) { \ 29 if (n <= 0) return; \ 30 n--; \ 31 *(char *) d = *(char *) s; \ 32 d++; s++; \ 33 } 34 #define ALIGN_DEST_TO8_DN(d,s,n) \ 35 while (d & 7) { \ 36 if (n <= 0) return; \ 37 n--; \ 38 d--; s--; \ 39 *(char *) d = *(char *) s; \ 40 } 41 42 /* 43 * This should similarly be done with ldq_u*2/ 44 * is aligned, but we don't fill in a full qua 45 */ 46 #define DO_REST_UP(d,s,n) \ 47 while (n > 0) { \ 48 n--; \ 49 *(char *) d = *(char *) s; \ 50 d++; s++; \ 51 } 52 #define DO_REST_DN(d,s,n) \ 53 while (n > 0) { \ 54 n--; \ 55 d--; s--; \ 56 *(char *) d = *(char *) s; \ 57 } 58 59 /* 60 * This should be done with ldq/mask/stq. The 61 * aligned, but we don't fill in a full quad-w 62 */ 63 #define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d 64 #define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d 65 66 /* 67 * This does unaligned memory copies. We want 68 * an unaligned address, as that would do a re 69 * We also want to avoid double-reading the un 70 * 71 * Note the ordering to try to avoid load (and 72 */ 73 static inline void __memcpy_unaligned_up (unsi 74 long 75 { 76 ALIGN_DEST_TO8_UP(d,s,n); 77 n -= 8; /* to avoid co 78 if (n >= 0) { 79 unsigned long low_word, high_w 80 __asm__("ldq_u %0,%1":"=r" (lo 81 do { 82 unsigned long tmp; 83 __asm__("ldq_u %0,%1": 84 n -= 8; 85 __asm__("extql %1,%2,% 86 :"=r" (low_wor 87 :"r" (low_word 88 __asm__("extqh %1,%2,% 89 :"=r" (tmp) 90 :"r" (high_wor 91 s += 8; 92 *(unsigned long *) d = 93 d += 8; 94 low_word = high_word; 95 } while (n >= 0); 96 } 97 n += 8; 98 DO_REST_UP(d,s,n); 99 } 100 101 static inline void __memcpy_unaligned_dn (unsi 102 long 103 { 104 /* I don't understand AXP assembler we 105 s += n; 106 d += n; 107 while (n--) 108 * (char *) --d = * (char *) -- 109 } 110 111 /* 112 * Hmm.. Strange. The __asm__ here is there to 113 * for the load-store. I don't know why, but i 114 * point register for the move seems to slow t 115 * though). 116 * 117 * Note the ordering to try to avoid load (and 118 */ 119 static inline void __memcpy_aligned_up (unsign 120 long n 121 { 122 ALIGN_DEST_TO8_UP(d,s,n); 123 n -= 8; 124 while (n >= 0) { 125 unsigned long tmp; 126 __asm__("ldq %0,%1":"=r" (tmp) 127 n -= 8; 128 s += 8; 129 *(unsigned long *) d = tmp; 130 d += 8; 131 } 132 n += 8; 133 DO_REST_ALIGNED_UP(d,s,n); 134 } 135 static inline void __memcpy_aligned_dn (unsign 136 long n 137 { 138 s += n; 139 d += n; 140 ALIGN_DEST_TO8_DN(d,s,n); 141 n -= 8; 142 while (n >= 0) { 143 unsigned long tmp; 144 s -= 8; 145 __asm__("ldq %0,%1":"=r" (tmp) 146 n -= 8; 147 d -= 8; 148 *(unsigned long *) d = tmp; 149 } 150 n += 8; 151 DO_REST_ALIGNED_DN(d,s,n); 152 } 153 154 #undef memcpy 155 156 void * memcpy(void * dest, const void *src, si 157 { 158 if (!(((unsigned long) dest ^ (unsigne 159 __memcpy_aligned_up ((unsigned 160 n); 161 return dest; 162 } 163 __memcpy_unaligned_up ((unsigned long) 164 return dest; 165 } 166 EXPORT_SYMBOL(memcpy); 167
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.