1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* copy_user.S: Sparc optimized copy_from_user 2 /* copy_user.S: Sparc optimized copy_from_user and copy_to_user code. 3 * 3 * 4 * Copyright(C) 1995 Linus Torvalds 4 * Copyright(C) 1995 Linus Torvalds 5 * Copyright(C) 1996 David S. Miller 5 * Copyright(C) 1996 David S. Miller 6 * Copyright(C) 1996 Eddie C. Dost 6 * Copyright(C) 1996 Eddie C. Dost 7 * Copyright(C) 1996,1998 Jakub Jelinek 7 * Copyright(C) 1996,1998 Jakub Jelinek 8 * 8 * 9 * derived from: 9 * derived from: 10 * e-mail between David and Eddie. 10 * e-mail between David and Eddie. 11 * 11 * 12 * Returns 0 if successful, otherwise count of 12 * Returns 0 if successful, otherwise count of bytes not copied yet 13 */ 13 */ 14 14 15 #include <linux/export.h> 15 #include <linux/export.h> 16 #include <asm/ptrace.h> 16 #include <asm/ptrace.h> 17 #include <asm/asmmacro.h> 17 #include <asm/asmmacro.h> 18 #include <asm/page.h> 18 #include <asm/page.h> 19 #include <asm/thread_info.h> 19 #include <asm/thread_info.h> 20 20 21 /* Work around cpp -rob */ 21 /* Work around cpp -rob */ 22 #define ALLOC #alloc 22 #define ALLOC #alloc 23 #define EXECINSTR #execinstr 23 #define EXECINSTR #execinstr 24 24 25 #define EX_ENTRY(l1, l2) 25 #define EX_ENTRY(l1, l2) \ 26 .section __ex_table,ALLOC; 26 .section __ex_table,ALLOC; \ 27 .align 4; 27 .align 4; \ 28 .word l1, l2; 28 .word l1, l2; \ 29 .text; 29 .text; 30 30 31 #define EX(x,y,a,b) 31 #define EX(x,y,a,b) \ 32 98: x,y; 32 98: x,y; \ 33 .section .fixup,ALLOC,EXECINSTR; 33 .section .fixup,ALLOC,EXECINSTR; \ 34 .align 4; 34 .align 4; \ 35 99: retl; 35 99: retl; \ 36 a, b, %o0; 36 a, b, %o0; \ 37 EX_ENTRY(98b, 99b) 37 EX_ENTRY(98b, 99b) 38 38 39 #define EX2(x,y,c,d,e,a,b) 39 #define EX2(x,y,c,d,e,a,b) \ 40 98: x,y; 40 98: x,y; \ 41 .section .fixup,ALLOC,EXECINSTR; 41 .section .fixup,ALLOC,EXECINSTR; \ 42 .align 4; 42 .align 4; \ 43 99: c, d, e; 43 99: c, d, e; \ 44 retl; 44 retl; \ 45 a, b, %o0; 45 a, b, %o0; \ 46 EX_ENTRY(98b, 99b) 46 EX_ENTRY(98b, 99b) 47 47 48 #define EXO2(x,y) 48 #define EXO2(x,y) \ 49 98: x, y; 49 98: x, y; \ 50 EX_ENTRY(98b, 97f) 50 EX_ENTRY(98b, 97f) 51 51 52 #define LD(insn, src, offset, reg, label) 52 #define LD(insn, src, offset, reg, label) \ 53 98: insn [%src + (offset)], %reg; 53 98: insn [%src + (offset)], %reg; \ 54 .section .fixup,ALLOC,EXECINSTR; 54 .section .fixup,ALLOC,EXECINSTR; \ 55 99: ba label; 55 99: ba label; \ 56 mov offset, %g5; 56 mov offset, %g5; \ 57 EX_ENTRY(98b, 99b) 57 EX_ENTRY(98b, 99b) 58 58 59 #define ST(insn, dst, offset, reg, label) 59 #define ST(insn, dst, offset, reg, label) \ 60 98: insn %reg, [%dst + (offset)]; 60 98: insn %reg, [%dst + (offset)]; \ 61 .section .fixup,ALLOC,EXECINSTR; 61 .section .fixup,ALLOC,EXECINSTR; \ 62 99: ba label; 62 99: ba label; \ 63 mov offset, %g5; 63 mov offset, %g5; \ 64 EX_ENTRY(98b, 99b) 64 EX_ENTRY(98b, 99b) 65 65 66 /* Both these macros have to start with exactl 66 /* Both these macros have to start with exactly the same insn */ 67 /* left: g7 + (g1 % 128) - offset */ 67 /* left: g7 + (g1 % 128) - offset */ 68 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1 68 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 69 LD(ldd, src, offset + 0x00, t0, bigchu 69 LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ 70 LD(ldd, src, offset + 0x08, t2, bigchu 70 LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ 71 LD(ldd, src, offset + 0x10, t4, bigchu 71 LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ 72 LD(ldd, src, offset + 0x18, t6, bigchu 72 LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ 73 ST(st, dst, offset + 0x00, t0, bigchun 73 ST(st, dst, offset + 0x00, t0, bigchunk_fault) \ 74 ST(st, dst, offset + 0x04, t1, bigchun 74 ST(st, dst, offset + 0x04, t1, bigchunk_fault) \ 75 ST(st, dst, offset + 0x08, t2, bigchun 75 ST(st, dst, offset + 0x08, t2, bigchunk_fault) \ 76 ST(st, dst, offset + 0x0c, t3, bigchun 76 ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \ 77 ST(st, dst, offset + 0x10, t4, bigchun 77 ST(st, dst, offset + 0x10, t4, bigchunk_fault) \ 78 ST(st, dst, offset + 0x14, t5, bigchun 78 ST(st, dst, offset + 0x14, t5, bigchunk_fault) \ 79 ST(st, dst, offset + 0x18, t6, bigchun 79 ST(st, dst, offset + 0x18, t6, bigchunk_fault) \ 80 ST(st, dst, offset + 0x1c, t7, bigchun 80 ST(st, dst, offset + 0x1c, t7, bigchunk_fault) 81 81 82 /* left: g7 + (g1 % 128) - offset */ 82 /* left: g7 + (g1 % 128) - offset */ 83 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t 83 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 84 LD(ldd, src, offset + 0x00, t0, bigchu 84 LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ 85 LD(ldd, src, offset + 0x08, t2, bigchu 85 LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ 86 LD(ldd, src, offset + 0x10, t4, bigchu 86 LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ 87 LD(ldd, src, offset + 0x18, t6, bigchu 87 LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ 88 ST(std, dst, offset + 0x00, t0, bigchu 88 ST(std, dst, offset + 0x00, t0, bigchunk_fault) \ 89 ST(std, dst, offset + 0x08, t2, bigchu 89 ST(std, dst, offset + 0x08, t2, bigchunk_fault) \ 90 ST(std, dst, offset + 0x10, t4, bigchu 90 ST(std, dst, offset + 0x10, t4, bigchunk_fault) \ 91 ST(std, dst, offset + 0x18, t6, bigchu 91 ST(std, dst, offset + 0x18, t6, bigchunk_fault) 92 92 93 .section .fixup,#alloc,#execinstr 93 .section .fixup,#alloc,#execinstr 94 bigchunk_fault: 94 bigchunk_fault: 95 sub %g7, %g5, %o0 95 sub %g7, %g5, %o0 96 and %g1, 127, %g1 96 and %g1, 127, %g1 97 retl 97 retl 98 add %o0, %g1, %o0 98 add %o0, %g1, %o0 99 99 100 /* left: offset + 16 + (g1 % 16) */ 100 /* left: offset + 16 + (g1 % 16) */ 101 #define MOVE_LASTCHUNK(src, dst, offset, t0, t 101 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ 102 LD(ldd, src, -(offset + 0x10), t0, las 102 LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \ 103 LD(ldd, src, -(offset + 0x08), t2, las 103 LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \ 104 ST(st, dst, -(offset + 0x10), t0, last 104 ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \ 105 ST(st, dst, -(offset + 0x0c), t1, last 105 ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \ 106 ST(st, dst, -(offset + 0x08), t2, last 106 ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \ 107 ST(st, dst, -(offset + 0x04), t3, last 107 ST(st, dst, -(offset + 0x04), t3, lastchunk_fault) 108 108 109 .section .fixup,#alloc,#execinstr 109 .section .fixup,#alloc,#execinstr 110 lastchunk_fault: 110 lastchunk_fault: 111 and %g1, 15, %g1 111 and %g1, 15, %g1 112 retl 112 retl 113 sub %g1, %g5, %o0 113 sub %g1, %g5, %o0 114 114 115 /* left: o3 + (o2 % 16) - offset */ 115 /* left: o3 + (o2 % 16) - offset */ 116 #define MOVE_HALFCHUNK(src, dst, offset, t0, t 116 #define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \ 117 LD(lduh, src, offset + 0x00, t0, halfc 117 LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \ 118 LD(lduh, src, offset + 0x02, t1, halfc 118 LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \ 119 LD(lduh, src, offset + 0x04, t2, halfc 119 LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \ 120 LD(lduh, src, offset + 0x06, t3, halfc 120 LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \ 121 ST(sth, dst, offset + 0x00, t0, halfch 121 ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \ 122 ST(sth, dst, offset + 0x02, t1, halfch 122 ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \ 123 ST(sth, dst, offset + 0x04, t2, halfch 123 ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \ 124 ST(sth, dst, offset + 0x06, t3, halfch 124 ST(sth, dst, offset + 0x06, t3, halfchunk_fault) 125 125 126 /* left: o3 + (o2 % 16) + offset + 2 */ 126 /* left: o3 + (o2 % 16) + offset + 2 */ 127 #define MOVE_SHORTCHUNK(src, dst, offset, t0, 127 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ 128 LD(ldub, src, -(offset + 0x02), t0, ha 128 LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \ 129 LD(ldub, src, -(offset + 0x01), t1, ha 129 LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \ 130 ST(stb, dst, -(offset + 0x02), t0, hal 130 ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \ 131 ST(stb, dst, -(offset + 0x01), t1, hal 131 ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault) 132 132 133 .section .fixup,#alloc,#execinstr 133 .section .fixup,#alloc,#execinstr 134 halfchunk_fault: 134 halfchunk_fault: 135 and %o2, 15, %o2 135 and %o2, 15, %o2 136 sub %o3, %g5, %o3 136 sub %o3, %g5, %o3 137 retl 137 retl 138 add %o2, %o3, %o0 138 add %o2, %o3, %o0 139 139 140 /* left: offset + 2 + (o2 % 2) */ 140 /* left: offset + 2 + (o2 % 2) */ 141 #define MOVE_LAST_SHORTCHUNK(src, dst, offset, 141 #define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \ 142 LD(ldub, src, -(offset + 0x02), t0, la 142 LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \ 143 LD(ldub, src, -(offset + 0x01), t1, la 143 LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \ 144 ST(stb, dst, -(offset + 0x02), t0, las 144 ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \ 145 ST(stb, dst, -(offset + 0x01), t1, las 145 ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault) 146 146 147 .section .fixup,#alloc,#execinstr 147 .section .fixup,#alloc,#execinstr 148 last_shortchunk_fault: 148 last_shortchunk_fault: 149 and %o2, 1, %o2 149 and %o2, 1, %o2 150 retl 150 retl 151 sub %o2, %g5, %o0 151 sub %o2, %g5, %o0 152 152 153 .text 153 .text 154 .align 4 154 .align 4 155 155 156 .globl __copy_user_begin 156 .globl __copy_user_begin 157 __copy_user_begin: 157 __copy_user_begin: 158 158 159 .globl __copy_user 159 .globl __copy_user 160 EXPORT_SYMBOL(__copy_user) 160 EXPORT_SYMBOL(__copy_user) 161 dword_align: 161 dword_align: 162 andcc %o1, 1, %g0 162 andcc %o1, 1, %g0 163 be 4f 163 be 4f 164 andcc %o1, 2, %g0 164 andcc %o1, 2, %g0 165 165 166 EXO2(ldub [%o1], %g2) 166 EXO2(ldub [%o1], %g2) 167 add %o1, 1, %o1 167 add %o1, 1, %o1 168 EXO2(stb %g2, [%o0]) 168 EXO2(stb %g2, [%o0]) 169 sub %o2, 1, %o2 169 sub %o2, 1, %o2 170 bne 3f 170 bne 3f 171 add %o0, 1, %o0 171 add %o0, 1, %o0 172 172 173 EXO2(lduh [%o1], %g2) 173 EXO2(lduh [%o1], %g2) 174 add %o1, 2, %o1 174 add %o1, 2, %o1 175 EXO2(sth %g2, [%o0]) 175 EXO2(sth %g2, [%o0]) 176 sub %o2, 2, %o2 176 sub %o2, 2, %o2 177 b 3f 177 b 3f 178 add %o0, 2, %o0 178 add %o0, 2, %o0 179 4: 179 4: 180 EXO2(lduh [%o1], %g2) 180 EXO2(lduh [%o1], %g2) 181 add %o1, 2, %o1 181 add %o1, 2, %o1 182 EXO2(sth %g2, [%o0]) 182 EXO2(sth %g2, [%o0]) 183 sub %o2, 2, %o2 183 sub %o2, 2, %o2 184 b 3f 184 b 3f 185 add %o0, 2, %o0 185 add %o0, 2, %o0 186 186 187 __copy_user: /* %o0=dst %o1=src %o2=len */ 187 __copy_user: /* %o0=dst %o1=src %o2=len */ 188 xor %o0, %o1, %o4 188 xor %o0, %o1, %o4 189 1: 189 1: 190 andcc %o4, 3, %o5 190 andcc %o4, 3, %o5 191 2: 191 2: 192 bne cannot_optimize 192 bne cannot_optimize 193 cmp %o2, 15 193 cmp %o2, 15 194 194 195 bleu short_aligned_end 195 bleu short_aligned_end 196 andcc %o1, 3, %g0 196 andcc %o1, 3, %g0 197 197 198 bne dword_align 198 bne dword_align 199 3: 199 3: 200 andcc %o1, 4, %g0 200 andcc %o1, 4, %g0 201 201 202 be 2f 202 be 2f 203 mov %o2, %g1 203 mov %o2, %g1 204 204 205 EXO2(ld [%o1], %o4) 205 EXO2(ld [%o1], %o4) 206 sub %g1, 4, %g1 206 sub %g1, 4, %g1 207 EXO2(st %o4, [%o0]) 207 EXO2(st %o4, [%o0]) 208 add %o1, 4, %o1 208 add %o1, 4, %o1 209 add %o0, 4, %o0 209 add %o0, 4, %o0 210 2: 210 2: 211 andcc %g1, 0xffffff80, %g7 211 andcc %g1, 0xffffff80, %g7 212 be 3f 212 be 3f 213 andcc %o0, 4, %g0 213 andcc %o0, 4, %g0 214 214 215 be ldd_std + 4 215 be ldd_std + 4 216 5: 216 5: 217 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4 217 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) 218 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4 218 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) 219 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4 219 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) 220 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4 220 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) 221 subcc %g7, 128, %g7 221 subcc %g7, 128, %g7 222 add %o1, 128, %o1 222 add %o1, 128, %o1 223 bne 5b 223 bne 5b 224 add %o0, 128, %o0 224 add %o0, 128, %o0 225 3: 225 3: 226 andcc %g1, 0x70, %g7 226 andcc %g1, 0x70, %g7 227 be copy_user_table_end 227 be copy_user_table_end 228 andcc %g1, 8, %g0 228 andcc %g1, 8, %g0 229 229 230 sethi %hi(copy_user_table_end), %o5 230 sethi %hi(copy_user_table_end), %o5 231 srl %g7, 1, %o4 231 srl %g7, 1, %o4 232 add %g7, %o4, %o4 232 add %g7, %o4, %o4 233 add %o1, %g7, %o1 233 add %o1, %g7, %o1 234 sub %o5, %o4, %o5 234 sub %o5, %o4, %o5 235 jmpl %o5 + %lo(copy_user_table_end) 235 jmpl %o5 + %lo(copy_user_table_end), %g0 236 add %o0, %g7, %o0 236 add %o0, %g7, %o0 237 237 238 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g 238 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) 239 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g 239 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) 240 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g 240 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) 241 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g 241 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5) 242 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g 242 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5) 243 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g 243 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) 244 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g 244 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) 245 copy_user_table_end: 245 copy_user_table_end: 246 be copy_user_last7 246 be copy_user_last7 247 andcc %g1, 4, %g0 247 andcc %g1, 4, %g0 248 248 249 EX(ldd [%o1], %g2, and %g1, 0xf) 249 EX(ldd [%o1], %g2, and %g1, 0xf) 250 add %o0, 8, %o0 250 add %o0, 8, %o0 251 add %o1, 8, %o1 251 add %o1, 8, %o1 252 EX(st %g2, [%o0 - 0x08], and %g1, 0x 252 EX(st %g2, [%o0 - 0x08], and %g1, 0xf) 253 EX2(st %g3, [%o0 - 0x04], and %g1, 0x 253 EX2(st %g3, [%o0 - 0x04], and %g1, 0xf, %g1, sub %g1, 4) 254 copy_user_last7: 254 copy_user_last7: 255 be 1f 255 be 1f 256 andcc %g1, 2, %g0 256 andcc %g1, 2, %g0 257 257 258 EX(ld [%o1], %g2, and %g1, 7) 258 EX(ld [%o1], %g2, and %g1, 7) 259 add %o1, 4, %o1 259 add %o1, 4, %o1 260 EX(st %g2, [%o0], and %g1, 7) 260 EX(st %g2, [%o0], and %g1, 7) 261 add %o0, 4, %o0 261 add %o0, 4, %o0 262 1: 262 1: 263 be 1f 263 be 1f 264 andcc %g1, 1, %g0 264 andcc %g1, 1, %g0 265 265 266 EX(lduh [%o1], %g2, and %g1, 3) 266 EX(lduh [%o1], %g2, and %g1, 3) 267 add %o1, 2, %o1 267 add %o1, 2, %o1 268 EX(sth %g2, [%o0], and %g1, 3) 268 EX(sth %g2, [%o0], and %g1, 3) 269 add %o0, 2, %o0 269 add %o0, 2, %o0 270 1: 270 1: 271 be 1f 271 be 1f 272 nop 272 nop 273 273 274 EX(ldub [%o1], %g2, add %g0, 1) 274 EX(ldub [%o1], %g2, add %g0, 1) 275 EX(stb %g2, [%o0], add %g0, 1) 275 EX(stb %g2, [%o0], add %g0, 1) 276 1: 276 1: 277 retl 277 retl 278 clr %o0 278 clr %o0 279 279 280 ldd_std: 280 ldd_std: 281 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o 281 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) 282 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o 282 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) 283 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o 283 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) 284 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o 284 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) 285 subcc %g7, 128, %g7 285 subcc %g7, 128, %g7 286 add %o1, 128, %o1 286 add %o1, 128, %o1 287 bne ldd_std 287 bne ldd_std 288 add %o0, 128, %o0 288 add %o0, 128, %o0 289 289 290 andcc %g1, 0x70, %g7 290 andcc %g1, 0x70, %g7 291 be copy_user_table_end 291 be copy_user_table_end 292 andcc %g1, 8, %g0 292 andcc %g1, 8, %g0 293 293 294 sethi %hi(copy_user_table_end), %o5 294 sethi %hi(copy_user_table_end), %o5 295 srl %g7, 1, %o4 295 srl %g7, 1, %o4 296 add %g7, %o4, %o4 296 add %g7, %o4, %o4 297 add %o1, %g7, %o1 297 add %o1, %g7, %o1 298 sub %o5, %o4, %o5 298 sub %o5, %o4, %o5 299 jmpl %o5 + %lo(copy_user_table_end) 299 jmpl %o5 + %lo(copy_user_table_end), %g0 300 add %o0, %g7, %o0 300 add %o0, %g7, %o0 301 301 302 cannot_optimize: 302 cannot_optimize: 303 bleu short_end 303 bleu short_end 304 cmp %o5, 2 304 cmp %o5, 2 305 305 306 bne byte_chunk 306 bne byte_chunk 307 and %o2, 0xfffffff0, %o3 307 and %o2, 0xfffffff0, %o3 308 308 309 andcc %o1, 1, %g0 309 andcc %o1, 1, %g0 310 be 10f 310 be 10f 311 nop 311 nop 312 312 313 EXO2(ldub [%o1], %g2) 313 EXO2(ldub [%o1], %g2) 314 add %o1, 1, %o1 314 add %o1, 1, %o1 315 EXO2(stb %g2, [%o0]) 315 EXO2(stb %g2, [%o0]) 316 sub %o2, 1, %o2 316 sub %o2, 1, %o2 317 andcc %o2, 0xfffffff0, %o3 317 andcc %o2, 0xfffffff0, %o3 318 be short_end 318 be short_end 319 add %o0, 1, %o0 319 add %o0, 1, %o0 320 10: 320 10: 321 MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g 321 MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5) 322 MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g 322 MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5) 323 subcc %o3, 0x10, %o3 323 subcc %o3, 0x10, %o3 324 add %o1, 0x10, %o1 324 add %o1, 0x10, %o1 325 bne 10b 325 bne 10b 326 add %o0, 0x10, %o0 326 add %o0, 0x10, %o0 327 b 2f 327 b 2f 328 and %o2, 0xe, %o3 328 and %o2, 0xe, %o3 329 329 330 byte_chunk: 330 byte_chunk: 331 MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3) 331 MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3) 332 MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3) 332 MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3) 333 MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3) 333 MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3) 334 MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3) 334 MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3) 335 MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3) 335 MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3) 336 MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3) 336 MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3) 337 MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3) 337 MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3) 338 MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3) 338 MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3) 339 subcc %o3, 0x10, %o3 339 subcc %o3, 0x10, %o3 340 add %o1, 0x10, %o1 340 add %o1, 0x10, %o1 341 bne byte_chunk 341 bne byte_chunk 342 add %o0, 0x10, %o0 342 add %o0, 0x10, %o0 343 343 344 short_end: 344 short_end: 345 and %o2, 0xe, %o3 345 and %o2, 0xe, %o3 346 2: 346 2: 347 sethi %hi(short_table_end), %o5 347 sethi %hi(short_table_end), %o5 348 sll %o3, 3, %o4 348 sll %o3, 3, %o4 349 add %o0, %o3, %o0 349 add %o0, %o3, %o0 350 sub %o5, %o4, %o5 350 sub %o5, %o4, %o5 351 add %o1, %o3, %o1 351 add %o1, %o3, %o1 352 jmpl %o5 + %lo(short_table_end), %g 352 jmpl %o5 + %lo(short_table_end), %g0 353 andcc %o2, 1, %g0 353 andcc %o2, 1, %g0 354 MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, 354 MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3) 355 MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, 355 MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3) 356 MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, 356 MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3) 357 MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, 357 MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3) 358 MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, 358 MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3) 359 MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, 359 MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3) 360 MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, 360 MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3) 361 short_table_end: 361 short_table_end: 362 be 1f 362 be 1f 363 nop 363 nop 364 EX(ldub [%o1], %g2, add %g0, 1) 364 EX(ldub [%o1], %g2, add %g0, 1) 365 EX(stb %g2, [%o0], add %g0, 1) 365 EX(stb %g2, [%o0], add %g0, 1) 366 1: 366 1: 367 retl 367 retl 368 clr %o0 368 clr %o0 369 369 370 short_aligned_end: 370 short_aligned_end: 371 bne short_end 371 bne short_end 372 andcc %o2, 8, %g0 372 andcc %o2, 8, %g0 373 373 374 be 1f 374 be 1f 375 andcc %o2, 4, %g0 375 andcc %o2, 4, %g0 376 376 377 EXO2(ld [%o1 + 0x00], %g2) 377 EXO2(ld [%o1 + 0x00], %g2) 378 EXO2(ld [%o1 + 0x04], %g3) 378 EXO2(ld [%o1 + 0x04], %g3) 379 add %o1, 8, %o1 379 add %o1, 8, %o1 380 EXO2(st %g2, [%o0 + 0x00]) 380 EXO2(st %g2, [%o0 + 0x00]) 381 EX(st %g3, [%o0 + 0x04], sub %o2, 4) 381 EX(st %g3, [%o0 + 0x04], sub %o2, 4) 382 add %o0, 8, %o0 382 add %o0, 8, %o0 383 1: 383 1: 384 b copy_user_last7 384 b copy_user_last7 385 mov %o2, %g1 385 mov %o2, %g1 386 386 387 .section .fixup,#alloc,#execinstr 387 .section .fixup,#alloc,#execinstr 388 .align 4 388 .align 4 389 97: 389 97: 390 retl 390 retl 391 mov %o2, %o0 391 mov %o2, %o0 392 392 393 .globl __copy_user_end 393 .globl __copy_user_end 394 __copy_user_end: 394 __copy_user_end:
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.