~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/lib/NG2memcpy.S

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sparc/lib/NG2memcpy.S (Version linux-6.11-rc3) and /arch/sparc/lib/NG2memcpy.S (Version linux-2.6.32.71)


  1 /* SPDX-License-Identifier: GPL-2.0 */         << 
  2 /* NG2memcpy.S: Niagara-2 optimized memcpy.         1 /* NG2memcpy.S: Niagara-2 optimized memcpy.
  3  *                                                  2  *
  4  * Copyright (C) 2007 David S. Miller (davem@d      3  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
  5  */                                                 4  */
  6                                                     5 
  7 #ifdef __KERNEL__                                   6 #ifdef __KERNEL__
  8 #include <linux/linkage.h>                     << 
  9 #include <asm/visasm.h>                             7 #include <asm/visasm.h>
 10 #include <asm/asi.h>                                8 #include <asm/asi.h>
 11 #define GLOBAL_SPARE    %g7                         9 #define GLOBAL_SPARE    %g7
 12 #else                                              10 #else
 13 #define ASI_PNF 0x82                               11 #define ASI_PNF 0x82
 14 #define ASI_BLK_P 0xf0                             12 #define ASI_BLK_P 0xf0
 15 #define ASI_BLK_INIT_QUAD_LDD_P 0xe2               13 #define ASI_BLK_INIT_QUAD_LDD_P 0xe2
 16 #define FPRS_FEF  0x04                             14 #define FPRS_FEF  0x04
 17 #ifdef MEMCPY_DEBUG                                15 #ifdef MEMCPY_DEBUG
 18 #define VISEntryHalf rd %fprs, %o5; wr %g0, FP     16 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
 19                      clr %g1; clr %g2; clr %g3 !!  17                      clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
 20 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr     18 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 21 #else                                              19 #else
 22 #define VISEntryHalf rd %fprs, %o5; wr %g0, FP     20 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
 23 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr     21 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 24 #endif                                             22 #endif
 25 #define GLOBAL_SPARE    %g5                        23 #define GLOBAL_SPARE    %g5
 26 #endif                                             24 #endif
 27                                                    25 
 28 #ifndef STORE_ASI                                  26 #ifndef STORE_ASI
 29 #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA            27 #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
 30 #define STORE_ASI       ASI_BLK_INIT_QUAD_LDD_     28 #define STORE_ASI       ASI_BLK_INIT_QUAD_LDD_P
 31 #else                                              29 #else
 32 #define STORE_ASI       0x80            /* ASI     30 #define STORE_ASI       0x80            /* ASI_P */
 33 #endif                                             31 #endif
 34 #endif                                             32 #endif
 35                                                    33 
 36 #ifndef EX_LD                                      34 #ifndef EX_LD
 37 #define EX_LD(x,y)      x                      !!  35 #define EX_LD(x)        x
 38 #endif                                         << 
 39 #ifndef EX_LD_FP                               << 
 40 #define EX_LD_FP(x,y)   x                      << 
 41 #endif                                             36 #endif
 42                                                    37 
 43 #ifndef EX_ST                                      38 #ifndef EX_ST
 44 #define EX_ST(x,y)      x                      !!  39 #define EX_ST(x)        x
 45 #endif                                             40 #endif
 46 #ifndef EX_ST_FP                               !!  41 
 47 #define EX_ST_FP(x,y)   x                      !!  42 #ifndef EX_RETVAL
                                                   >>  43 #define EX_RETVAL(x)    x
 48 #endif                                             44 #endif
 49                                                    45 
 50 #ifndef LOAD                                       46 #ifndef LOAD
 51 #define LOAD(type,addr,dest)    type [addr], d     47 #define LOAD(type,addr,dest)    type [addr], dest
 52 #endif                                             48 #endif
 53                                                    49 
 54 #ifndef LOAD_BLK                                   50 #ifndef LOAD_BLK
 55 #define LOAD_BLK(addr,dest)     ldda [addr] AS     51 #define LOAD_BLK(addr,dest)     ldda [addr] ASI_BLK_P, dest
 56 #endif                                             52 #endif
 57                                                    53 
 58 #ifndef STORE                                      54 #ifndef STORE
 59 #ifndef MEMCPY_DEBUG                               55 #ifndef MEMCPY_DEBUG
 60 #define STORE(type,src,addr)    type src, [add     56 #define STORE(type,src,addr)    type src, [addr]
 61 #else                                              57 #else
 62 #define STORE(type,src,addr)    type##a src, [     58 #define STORE(type,src,addr)    type##a src, [addr] 0x80
 63 #endif                                             59 #endif
 64 #endif                                             60 #endif
 65                                                    61 
 66 #ifndef STORE_BLK                                  62 #ifndef STORE_BLK
 67 #define STORE_BLK(src,addr)     stda src, [add     63 #define STORE_BLK(src,addr)     stda src, [addr] ASI_BLK_P
 68 #endif                                             64 #endif
 69                                                    65 
 70 #ifndef STORE_INIT                                 66 #ifndef STORE_INIT
 71 #define STORE_INIT(src,addr)    stxa src, [add     67 #define STORE_INIT(src,addr)    stxa src, [addr] STORE_ASI
 72 #endif                                             68 #endif
 73                                                    69 
 74 #ifndef FUNC_NAME                                  70 #ifndef FUNC_NAME
 75 #define FUNC_NAME       NG2memcpy                  71 #define FUNC_NAME       NG2memcpy
 76 #endif                                             72 #endif
 77                                                    73 
 78 #ifndef PREAMBLE                                   74 #ifndef PREAMBLE
 79 #define PREAMBLE                                   75 #define PREAMBLE
 80 #endif                                             76 #endif
 81                                                    77 
 82 #ifndef XCC                                        78 #ifndef XCC
 83 #define XCC xcc                                    79 #define XCC xcc
 84 #endif                                             80 #endif
 85                                                    81 
 86 #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6,      82 #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
 87         faligndata      %x0, %x1, %f0; \           83         faligndata      %x0, %x1, %f0; \
 88         faligndata      %x1, %x2, %f2; \           84         faligndata      %x1, %x2, %f2; \
 89         faligndata      %x2, %x3, %f4; \           85         faligndata      %x2, %x3, %f4; \
 90         faligndata      %x3, %x4, %f6; \           86         faligndata      %x3, %x4, %f6; \
 91         faligndata      %x4, %x5, %f8; \           87         faligndata      %x4, %x5, %f8; \
 92         faligndata      %x5, %x6, %f10; \          88         faligndata      %x5, %x6, %f10; \
 93         faligndata      %x6, %x7, %f12; \          89         faligndata      %x6, %x7, %f12; \
 94         faligndata      %x7, %x8, %f14;            90         faligndata      %x7, %x8, %f14;
 95                                                    91 
 96 #define FREG_MOVE_1(x0) \                          92 #define FREG_MOVE_1(x0) \
 97         fsrc2           %x0, %f0;              !!  93         fmovd           %x0, %f0;
 98 #define FREG_MOVE_2(x0, x1) \                      94 #define FREG_MOVE_2(x0, x1) \
 99         fsrc2           %x0, %f0; \            !!  95         fmovd           %x0, %f0; \
100         fsrc2           %x1, %f2;              !!  96         fmovd           %x1, %f2;
101 #define FREG_MOVE_3(x0, x1, x2) \                  97 #define FREG_MOVE_3(x0, x1, x2) \
102         fsrc2           %x0, %f0; \            !!  98         fmovd           %x0, %f0; \
103         fsrc2           %x1, %f2; \            !!  99         fmovd           %x1, %f2; \
104         fsrc2           %x2, %f4;              !! 100         fmovd           %x2, %f4;
105 #define FREG_MOVE_4(x0, x1, x2, x3) \             101 #define FREG_MOVE_4(x0, x1, x2, x3) \
106         fsrc2           %x0, %f0; \            !! 102         fmovd           %x0, %f0; \
107         fsrc2           %x1, %f2; \            !! 103         fmovd           %x1, %f2; \
108         fsrc2           %x2, %f4; \            !! 104         fmovd           %x2, %f4; \
109         fsrc2           %x3, %f6;              !! 105         fmovd           %x3, %f6;
110 #define FREG_MOVE_5(x0, x1, x2, x3, x4) \         106 #define FREG_MOVE_5(x0, x1, x2, x3, x4) \
111         fsrc2           %x0, %f0; \            !! 107         fmovd           %x0, %f0; \
112         fsrc2           %x1, %f2; \            !! 108         fmovd           %x1, %f2; \
113         fsrc2           %x2, %f4; \            !! 109         fmovd           %x2, %f4; \
114         fsrc2           %x3, %f6; \            !! 110         fmovd           %x3, %f6; \
115         fsrc2           %x4, %f8;              !! 111         fmovd           %x4, %f8;
116 #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \     112 #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
117         fsrc2           %x0, %f0; \            !! 113         fmovd           %x0, %f0; \
118         fsrc2           %x1, %f2; \            !! 114         fmovd           %x1, %f2; \
119         fsrc2           %x2, %f4; \            !! 115         fmovd           %x2, %f4; \
120         fsrc2           %x3, %f6; \            !! 116         fmovd           %x3, %f6; \
121         fsrc2           %x4, %f8; \            !! 117         fmovd           %x4, %f8; \
122         fsrc2           %x5, %f10;             !! 118         fmovd           %x5, %f10;
123 #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6    119 #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
124         fsrc2           %x0, %f0; \            !! 120         fmovd           %x0, %f0; \
125         fsrc2           %x1, %f2; \            !! 121         fmovd           %x1, %f2; \
126         fsrc2           %x2, %f4; \            !! 122         fmovd           %x2, %f4; \
127         fsrc2           %x3, %f6; \            !! 123         fmovd           %x3, %f6; \
128         fsrc2           %x4, %f8; \            !! 124         fmovd           %x4, %f8; \
129         fsrc2           %x5, %f10; \           !! 125         fmovd           %x5, %f10; \
130         fsrc2           %x6, %f12;             !! 126         fmovd           %x6, %f12;
131 #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6    127 #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
132         fsrc2           %x0, %f0; \            !! 128         fmovd           %x0, %f0; \
133         fsrc2           %x1, %f2; \            !! 129         fmovd           %x1, %f2; \
134         fsrc2           %x2, %f4; \            !! 130         fmovd           %x2, %f4; \
135         fsrc2           %x3, %f6; \            !! 131         fmovd           %x3, %f6; \
136         fsrc2           %x4, %f8; \            !! 132         fmovd           %x4, %f8; \
137         fsrc2           %x5, %f10; \           !! 133         fmovd           %x5, %f10; \
138         fsrc2           %x6, %f12; \           !! 134         fmovd           %x6, %f12; \
139         fsrc2           %x7, %f14;             !! 135         fmovd           %x7, %f14;
140 #define FREG_LOAD_1(base, x0) \                   136 #define FREG_LOAD_1(base, x0) \
141         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 137         EX_LD(LOAD(ldd, base + 0x00, %x0))
142 #define FREG_LOAD_2(base, x0, x1) \               138 #define FREG_LOAD_2(base, x0, x1) \
143         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 139         EX_LD(LOAD(ldd, base + 0x00, %x0)); \
144         EX_LD_FP(LOAD(ldd, base + 0x08, %x1),  !! 140         EX_LD(LOAD(ldd, base + 0x08, %x1));
145 #define FREG_LOAD_3(base, x0, x1, x2) \           141 #define FREG_LOAD_3(base, x0, x1, x2) \
146         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 142         EX_LD(LOAD(ldd, base + 0x00, %x0)); \
147         EX_LD_FP(LOAD(ldd, base + 0x08, %x1),  !! 143         EX_LD(LOAD(ldd, base + 0x08, %x1)); \
148         EX_LD_FP(LOAD(ldd, base + 0x10, %x2),  !! 144         EX_LD(LOAD(ldd, base + 0x10, %x2));
149 #define FREG_LOAD_4(base, x0, x1, x2, x3) \       145 #define FREG_LOAD_4(base, x0, x1, x2, x3) \
150         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 146         EX_LD(LOAD(ldd, base + 0x00, %x0)); \
151         EX_LD_FP(LOAD(ldd, base + 0x08, %x1),  !! 147         EX_LD(LOAD(ldd, base + 0x08, %x1)); \
152         EX_LD_FP(LOAD(ldd, base + 0x10, %x2),  !! 148         EX_LD(LOAD(ldd, base + 0x10, %x2)); \
153         EX_LD_FP(LOAD(ldd, base + 0x18, %x3),  !! 149         EX_LD(LOAD(ldd, base + 0x18, %x3));
154 #define FREG_LOAD_5(base, x0, x1, x2, x3, x4)     150 #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
155         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 151         EX_LD(LOAD(ldd, base + 0x00, %x0)); \
156         EX_LD_FP(LOAD(ldd, base + 0x08, %x1),  !! 152         EX_LD(LOAD(ldd, base + 0x08, %x1)); \
157         EX_LD_FP(LOAD(ldd, base + 0x10, %x2),  !! 153         EX_LD(LOAD(ldd, base + 0x10, %x2)); \
158         EX_LD_FP(LOAD(ldd, base + 0x18, %x3),  !! 154         EX_LD(LOAD(ldd, base + 0x18, %x3)); \
159         EX_LD_FP(LOAD(ldd, base + 0x20, %x4),  !! 155         EX_LD(LOAD(ldd, base + 0x20, %x4));
160 #define FREG_LOAD_6(base, x0, x1, x2, x3, x4,     156 #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
161         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 157         EX_LD(LOAD(ldd, base + 0x00, %x0)); \
162         EX_LD_FP(LOAD(ldd, base + 0x08, %x1),  !! 158         EX_LD(LOAD(ldd, base + 0x08, %x1)); \
163         EX_LD_FP(LOAD(ldd, base + 0x10, %x2),  !! 159         EX_LD(LOAD(ldd, base + 0x10, %x2)); \
164         EX_LD_FP(LOAD(ldd, base + 0x18, %x3),  !! 160         EX_LD(LOAD(ldd, base + 0x18, %x3)); \
165         EX_LD_FP(LOAD(ldd, base + 0x20, %x4),  !! 161         EX_LD(LOAD(ldd, base + 0x20, %x4)); \
166         EX_LD_FP(LOAD(ldd, base + 0x28, %x5),  !! 162         EX_LD(LOAD(ldd, base + 0x28, %x5));
167 #define FREG_LOAD_7(base, x0, x1, x2, x3, x4,     163 #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
168         EX_LD_FP(LOAD(ldd, base + 0x00, %x0),  !! 164         EX_LD(LOAD(ldd, base + 0x00, %x0)); \
169         EX_LD_FP(LOAD(ldd, base + 0x08, %x1),  !! 165         EX_LD(LOAD(ldd, base + 0x08, %x1)); \
170         EX_LD_FP(LOAD(ldd, base + 0x10, %x2),  !! 166         EX_LD(LOAD(ldd, base + 0x10, %x2)); \
171         EX_LD_FP(LOAD(ldd, base + 0x18, %x3),  !! 167         EX_LD(LOAD(ldd, base + 0x18, %x3)); \
172         EX_LD_FP(LOAD(ldd, base + 0x20, %x4),  !! 168         EX_LD(LOAD(ldd, base + 0x20, %x4)); \
173         EX_LD_FP(LOAD(ldd, base + 0x28, %x5),  !! 169         EX_LD(LOAD(ldd, base + 0x28, %x5)); \
174         EX_LD_FP(LOAD(ldd, base + 0x30, %x6),  !! 170         EX_LD(LOAD(ldd, base + 0x30, %x6));
175                                                   171 
176         .register       %g2,#scratch              172         .register       %g2,#scratch
177         .register       %g3,#scratch              173         .register       %g3,#scratch
178                                                   174 
179         .text                                     175         .text
180 #ifndef EX_RETVAL                              << 
181 #define EX_RETVAL(x)    x                      << 
182 __restore_fp:                                  << 
183         VISExitHalf                            << 
184 __restore_asi:                                 << 
185         retl                                   << 
186          wr     %g0, ASI_AIUS, %asi            << 
187 ENTRY(NG2_retl_o2)                             << 
188         ba,pt   %xcc, __restore_asi            << 
189          mov    %o2, %o0                       << 
190 ENDPROC(NG2_retl_o2)                           << 
191 ENTRY(NG2_retl_o2_plus_1)                      << 
192         ba,pt   %xcc, __restore_asi            << 
193          add    %o2, 1, %o0                    << 
194 ENDPROC(NG2_retl_o2_plus_1)                    << 
195 ENTRY(NG2_retl_o2_plus_4)                      << 
196         ba,pt   %xcc, __restore_asi            << 
197          add    %o2, 4, %o0                    << 
198 ENDPROC(NG2_retl_o2_plus_4)                    << 
199 ENTRY(NG2_retl_o2_plus_8)                      << 
200         ba,pt   %xcc, __restore_asi            << 
201          add    %o2, 8, %o0                    << 
202 ENDPROC(NG2_retl_o2_plus_8)                    << 
203 ENTRY(NG2_retl_o2_plus_o4_plus_1)              << 
204         add     %o4, 1, %o4                    << 
205         ba,pt   %xcc, __restore_asi            << 
206          add    %o2, %o4, %o0                  << 
207 ENDPROC(NG2_retl_o2_plus_o4_plus_1)            << 
208 ENTRY(NG2_retl_o2_plus_o4_plus_8)              << 
209         add     %o4, 8, %o4                    << 
210         ba,pt   %xcc, __restore_asi            << 
211          add    %o2, %o4, %o0                  << 
212 ENDPROC(NG2_retl_o2_plus_o4_plus_8)            << 
213 ENTRY(NG2_retl_o2_plus_o4_plus_16)             << 
214         add     %o4, 16, %o4                   << 
215         ba,pt   %xcc, __restore_asi            << 
216          add    %o2, %o4, %o0                  << 
217 ENDPROC(NG2_retl_o2_plus_o4_plus_16)           << 
218 ENTRY(NG2_retl_o2_plus_g1_fp)                  << 
219         ba,pt   %xcc, __restore_fp             << 
220          add    %o2, %g1, %o0                  << 
221 ENDPROC(NG2_retl_o2_plus_g1_fp)                << 
222 ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)          << 
223         add     %g1, 64, %g1                   << 
224         ba,pt   %xcc, __restore_fp             << 
225          add    %o2, %g1, %o0                  << 
226 ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)        << 
227 ENTRY(NG2_retl_o2_plus_g1_plus_1)              << 
228         add     %g1, 1, %g1                    << 
229         ba,pt   %xcc, __restore_asi            << 
230          add    %o2, %g1, %o0                  << 
231 ENDPROC(NG2_retl_o2_plus_g1_plus_1)            << 
232 ENTRY(NG2_retl_o2_and_7_plus_o4)               << 
233         and     %o2, 7, %o2                    << 
234         ba,pt   %xcc, __restore_asi            << 
235          add    %o2, %o4, %o0                  << 
236 ENDPROC(NG2_retl_o2_and_7_plus_o4)             << 
237 ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)        << 
238         and     %o2, 7, %o2                    << 
239         add     %o4, 8, %o4                    << 
240         ba,pt   %xcc, __restore_asi            << 
241          add    %o2, %o4, %o0                  << 
242 ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)      << 
243 #endif                                         << 
244                                                << 
245         .align          64                        176         .align          64
246                                                   177 
247         .globl  FUNC_NAME                         178         .globl  FUNC_NAME
248         .type   FUNC_NAME,#function               179         .type   FUNC_NAME,#function
249 FUNC_NAME:      /* %o0=dst, %o1=src, %o2=len *    180 FUNC_NAME:      /* %o0=dst, %o1=src, %o2=len */
250         srlx            %o2, 31, %g2              181         srlx            %o2, 31, %g2
251         cmp             %g2, 0                    182         cmp             %g2, 0
252         tne             %xcc, 5                   183         tne             %xcc, 5
253         PREAMBLE                                  184         PREAMBLE
254         mov             %o0, %o3               !! 185         mov             %o0, GLOBAL_SPARE
255         cmp             %o2, 0                    186         cmp             %o2, 0
256         be,pn           %XCC, 85f                 187         be,pn           %XCC, 85f
257          or             %o0, %o1, GLOBAL_SPARE !! 188          or             %o0, %o1, %o3
258         cmp             %o2, 16                   189         cmp             %o2, 16
259         blu,a,pn        %XCC, 80f                 190         blu,a,pn        %XCC, 80f
260          or             GLOBAL_SPARE, %o2, GLO !! 191          or             %o3, %o2, %o3
261                                                   192 
262         /* 2 blocks (128 bytes) is the minimum    193         /* 2 blocks (128 bytes) is the minimum we can do the block
263          * copy with.  We need to ensure that     194          * copy with.  We need to ensure that we'll iterate at least
264          * once in the block copy loop.  At wo    195          * once in the block copy loop.  At worst we'll need to align
265          * the destination to a 64-byte bounda    196          * the destination to a 64-byte boundary which can chew up
266          * to (64 - 1) bytes from the length b    197          * to (64 - 1) bytes from the length before we perform the
267          * block copy loop.                       198          * block copy loop.
268          *                                        199          *
269          * However, the cut-off point, perform    200          * However, the cut-off point, performance wise, is around
270          * 4 64-byte blocks.                      201          * 4 64-byte blocks.
271          */                                       202          */
272         cmp             %o2, (4 * 64)             203         cmp             %o2, (4 * 64)
273         blu,pt          %XCC, 75f                 204         blu,pt          %XCC, 75f
274          andcc          GLOBAL_SPARE, 0x7, %g0 !! 205          andcc          %o3, 0x7, %g0
275                                                   206 
276         /* %o0: dst                               207         /* %o0: dst
277          * %o1: src                               208          * %o1: src
278          * %o2: len  (known to be >= 128)         209          * %o2: len  (known to be >= 128)
279          *                                        210          *
280          * The block copy loops can use %o4, %    211          * The block copy loops can use %o4, %g2, %g3 as
281          * temporaries while copying the data.    212          * temporaries while copying the data.  %o5 must
282          * be preserved between VISEntryHalf a    213          * be preserved between VISEntryHalf and VISExitHalf
283          */                                       214          */
284                                                   215 
285         LOAD(prefetch, %o1 + 0x000, #one_read)    216         LOAD(prefetch, %o1 + 0x000, #one_read)
286         LOAD(prefetch, %o1 + 0x040, #one_read)    217         LOAD(prefetch, %o1 + 0x040, #one_read)
287         LOAD(prefetch, %o1 + 0x080, #one_read)    218         LOAD(prefetch, %o1 + 0x080, #one_read)
288                                                   219 
289         /* Align destination on 64-byte bounda    220         /* Align destination on 64-byte boundary.  */
290         andcc           %o0, (64 - 1), %o4        221         andcc           %o0, (64 - 1), %o4
291         be,pt           %XCC, 2f                  222         be,pt           %XCC, 2f
292          sub            %o4, 64, %o4              223          sub            %o4, 64, %o4
293         sub             %g0, %o4, %o4   ! byte    224         sub             %g0, %o4, %o4   ! bytes to align dst
294         sub             %o2, %o4, %o2             225         sub             %o2, %o4, %o2
295 1:      subcc           %o4, 1, %o4               226 1:      subcc           %o4, 1, %o4
296         EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o !! 227         EX_LD(LOAD(ldub, %o1, %g1))
297         EX_ST(STORE(stb, %g1, %o0), NG2_retl_o !! 228         EX_ST(STORE(stb, %g1, %o0))
298         add             %o1, 1, %o1               229         add             %o1, 1, %o1
299         bne,pt          %XCC, 1b                  230         bne,pt          %XCC, 1b
300         add             %o0, 1, %o0               231         add             %o0, 1, %o0
301                                                   232 
302 2:                                                233 2:
303         /* Clobbers o5/g1/g2/g3/g7/icc/xcc.  W    234         /* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
304          * o5 from here until we hit VISExitHa    235          * o5 from here until we hit VISExitHalf.
305          */                                       236          */
306         VISEntryHalf                              237         VISEntryHalf
307                                                   238 
308         membar          #Sync                  << 
309         alignaddr       %o1, %g0, %g0             239         alignaddr       %o1, %g0, %g0
310                                                   240 
311         add             %o1, (64 - 1), %o4        241         add             %o1, (64 - 1), %o4
312         andn            %o4, (64 - 1), %o4        242         andn            %o4, (64 - 1), %o4
313         andn            %o2, (64 - 1), %g1        243         andn            %o2, (64 - 1), %g1
314         sub             %o2, %g1, %o2             244         sub             %o2, %g1, %o2
315                                                   245 
316         and             %o1, (64 - 1), %g2        246         and             %o1, (64 - 1), %g2
317         add             %o1, %g1, %o1             247         add             %o1, %g1, %o1
318         sub             %o0, %o4, %g3             248         sub             %o0, %o4, %g3
319         brz,pt          %g2, 190f                 249         brz,pt          %g2, 190f
320          cmp            %g2, 32                   250          cmp            %g2, 32
321         blu,a           5f                        251         blu,a           5f
322          cmp            %g2, 16                   252          cmp            %g2, 16
323         cmp             %g2, 48                   253         cmp             %g2, 48
324         blu,a           4f                        254         blu,a           4f
325          cmp            %g2, 40                   255          cmp            %g2, 40
326         cmp             %g2, 56                   256         cmp             %g2, 56
327         blu             170f                      257         blu             170f
328          nop                                      258          nop
329         ba,a,pt         %xcc, 180f                259         ba,a,pt         %xcc, 180f
330          nop                                   << 
331                                                   260 
332 4:      /* 32 <= low bits < 48 */                 261 4:      /* 32 <= low bits < 48 */
333         blu             150f                      262         blu             150f
334          nop                                      263          nop
335         ba,a,pt         %xcc, 160f                264         ba,a,pt         %xcc, 160f
336          nop                                   << 
337 5:      /* 0 < low bits < 32 */                   265 5:      /* 0 < low bits < 32 */
338         blu,a           6f                        266         blu,a           6f
339          cmp            %g2, 8                    267          cmp            %g2, 8
340         cmp             %g2, 24                   268         cmp             %g2, 24
341         blu             130f                      269         blu             130f
342          nop                                      270          nop
343         ba,a,pt         %xcc, 140f                271         ba,a,pt         %xcc, 140f
344          nop                                   << 
345 6:      /* 0 < low bits < 16 */                   272 6:      /* 0 < low bits < 16 */
346         bgeu            120f                      273         bgeu            120f
347          nop                                      274          nop
348         /* fall through for 0 < low bits < 8 *    275         /* fall through for 0 < low bits < 8 */
349 110:    sub             %o4, 64, %g2              276 110:    sub             %o4, 64, %g2
350         EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_ !! 277         EX_LD(LOAD_BLK(%g2, %f0))
351 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 278 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
352         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 279         EX_LD(LOAD_BLK(%o4, %f16))
353         FREG_FROB(f0, f2, f4, f6, f8, f10, f12    280         FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
354         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 281         EX_ST(STORE_BLK(%f0, %o4 + %g3))
355         FREG_MOVE_8(f16, f18, f20, f22, f24, f    282         FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
356         subcc           %g1, 64, %g1              283         subcc           %g1, 64, %g1
357         add             %o4, 64, %o4              284         add             %o4, 64, %o4
358         bne,pt          %xcc, 1b                  285         bne,pt          %xcc, 1b
359          LOAD(prefetch, %o4 + 64, #one_read)      286          LOAD(prefetch, %o4 + 64, #one_read)
360         ba,pt           %xcc, 195f                287         ba,pt           %xcc, 195f
361          nop                                      288          nop
362                                                   289 
363 120:    sub             %o4, 56, %g2              290 120:    sub             %o4, 56, %g2
364         FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f    291         FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
365 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 292 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
366         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 293         EX_LD(LOAD_BLK(%o4, %f16))
367         FREG_FROB(f0, f2, f4, f6, f8, f10, f12    294         FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
368         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 295         EX_ST(STORE_BLK(%f0, %o4 + %g3))
369         FREG_MOVE_7(f18, f20, f22, f24, f26, f    296         FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
370         subcc           %g1, 64, %g1              297         subcc           %g1, 64, %g1
371         add             %o4, 64, %o4              298         add             %o4, 64, %o4
372         bne,pt          %xcc, 1b                  299         bne,pt          %xcc, 1b
373          LOAD(prefetch, %o4 + 64, #one_read)      300          LOAD(prefetch, %o4 + 64, #one_read)
374         ba,pt           %xcc, 195f                301         ba,pt           %xcc, 195f
375          nop                                      302          nop
376                                                   303 
377 130:    sub             %o4, 48, %g2              304 130:    sub             %o4, 48, %g2
378         FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f    305         FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
379 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 306 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
380         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 307         EX_LD(LOAD_BLK(%o4, %f16))
381         FREG_FROB(f0, f2, f4, f6, f8, f10, f16    308         FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
382         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 309         EX_ST(STORE_BLK(%f0, %o4 + %g3))
383         FREG_MOVE_6(f20, f22, f24, f26, f28, f    310         FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
384         subcc           %g1, 64, %g1              311         subcc           %g1, 64, %g1
385         add             %o4, 64, %o4              312         add             %o4, 64, %o4
386         bne,pt          %xcc, 1b                  313         bne,pt          %xcc, 1b
387          LOAD(prefetch, %o4 + 64, #one_read)      314          LOAD(prefetch, %o4 + 64, #one_read)
388         ba,pt           %xcc, 195f                315         ba,pt           %xcc, 195f
389          nop                                      316          nop
390                                                   317 
391 140:    sub             %o4, 40, %g2              318 140:    sub             %o4, 40, %g2
392         FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)      319         FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
393 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 320 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
394         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 321         EX_LD(LOAD_BLK(%o4, %f16))
395         FREG_FROB(f0, f2, f4, f6, f8, f16, f18    322         FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
396         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 323         EX_ST(STORE_BLK(%f0, %o4 + %g3))
397         FREG_MOVE_5(f22, f24, f26, f28, f30)      324         FREG_MOVE_5(f22, f24, f26, f28, f30)
398         subcc           %g1, 64, %g1              325         subcc           %g1, 64, %g1
399         add             %o4, 64, %o4              326         add             %o4, 64, %o4
400         bne,pt          %xcc, 1b                  327         bne,pt          %xcc, 1b
401          LOAD(prefetch, %o4 + 64, #one_read)      328          LOAD(prefetch, %o4 + 64, #one_read)
402         ba,pt           %xcc, 195f                329         ba,pt           %xcc, 195f
403          nop                                      330          nop
404                                                   331 
405 150:    sub             %o4, 32, %g2              332 150:    sub             %o4, 32, %g2
406         FREG_LOAD_4(%g2, f0, f2, f4, f6)          333         FREG_LOAD_4(%g2, f0, f2, f4, f6)
407 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 334 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
408         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 335         EX_LD(LOAD_BLK(%o4, %f16))
409         FREG_FROB(f0, f2, f4, f6, f16, f18, f2    336         FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
410         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 337         EX_ST(STORE_BLK(%f0, %o4 + %g3))
411         FREG_MOVE_4(f24, f26, f28, f30)           338         FREG_MOVE_4(f24, f26, f28, f30)
412         subcc           %g1, 64, %g1              339         subcc           %g1, 64, %g1
413         add             %o4, 64, %o4              340         add             %o4, 64, %o4
414         bne,pt          %xcc, 1b                  341         bne,pt          %xcc, 1b
415          LOAD(prefetch, %o4 + 64, #one_read)      342          LOAD(prefetch, %o4 + 64, #one_read)
416         ba,pt           %xcc, 195f                343         ba,pt           %xcc, 195f
417          nop                                      344          nop
418                                                   345 
419 160:    sub             %o4, 24, %g2              346 160:    sub             %o4, 24, %g2
420         FREG_LOAD_3(%g2, f0, f2, f4)              347         FREG_LOAD_3(%g2, f0, f2, f4)
421 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 348 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
422         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 349         EX_LD(LOAD_BLK(%o4, %f16))
423         FREG_FROB(f0, f2, f4, f16, f18, f20, f    350         FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
424         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 351         EX_ST(STORE_BLK(%f0, %o4 + %g3))
425         FREG_MOVE_3(f26, f28, f30)                352         FREG_MOVE_3(f26, f28, f30)
426         subcc           %g1, 64, %g1              353         subcc           %g1, 64, %g1
427         add             %o4, 64, %o4              354         add             %o4, 64, %o4
428         bne,pt          %xcc, 1b                  355         bne,pt          %xcc, 1b
429          LOAD(prefetch, %o4 + 64, #one_read)      356          LOAD(prefetch, %o4 + 64, #one_read)
430         ba,pt           %xcc, 195f                357         ba,pt           %xcc, 195f
431          nop                                      358          nop
432                                                   359 
433 170:    sub             %o4, 16, %g2              360 170:    sub             %o4, 16, %g2
434         FREG_LOAD_2(%g2, f0, f2)                  361         FREG_LOAD_2(%g2, f0, f2)
435 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 362 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
436         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 363         EX_LD(LOAD_BLK(%o4, %f16))
437         FREG_FROB(f0, f2, f16, f18, f20, f22,     364         FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
438         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 365         EX_ST(STORE_BLK(%f0, %o4 + %g3))
439         FREG_MOVE_2(f28, f30)                     366         FREG_MOVE_2(f28, f30)
440         subcc           %g1, 64, %g1              367         subcc           %g1, 64, %g1
441         add             %o4, 64, %o4              368         add             %o4, 64, %o4
442         bne,pt          %xcc, 1b                  369         bne,pt          %xcc, 1b
443          LOAD(prefetch, %o4 + 64, #one_read)      370          LOAD(prefetch, %o4 + 64, #one_read)
444         ba,pt           %xcc, 195f                371         ba,pt           %xcc, 195f
445          nop                                      372          nop
446                                                   373 
447 180:    sub             %o4, 8, %g2               374 180:    sub             %o4, 8, %g2
448         FREG_LOAD_1(%g2, f0)                      375         FREG_LOAD_1(%g2, f0)
449 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 376 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
450         EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl !! 377         EX_LD(LOAD_BLK(%o4, %f16))
451         FREG_FROB(f0, f16, f18, f20, f22, f24,    378         FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
452         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 379         EX_ST(STORE_BLK(%f0, %o4 + %g3))
453         FREG_MOVE_1(f30)                          380         FREG_MOVE_1(f30)
454         subcc           %g1, 64, %g1              381         subcc           %g1, 64, %g1
455         add             %o4, 64, %o4              382         add             %o4, 64, %o4
456         bne,pt          %xcc, 1b                  383         bne,pt          %xcc, 1b
457          LOAD(prefetch, %o4 + 64, #one_read)      384          LOAD(prefetch, %o4 + 64, #one_read)
458         ba,pt           %xcc, 195f                385         ba,pt           %xcc, 195f
459          nop                                      386          nop
460                                                   387 
461 190:                                              388 190:
462 1:      EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), N !! 389 1:      EX_ST(STORE_INIT(%g0, %o4 + %g3))
463         subcc           %g1, 64, %g1              390         subcc           %g1, 64, %g1
464         EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_ !! 391         EX_LD(LOAD_BLK(%o4, %f0))
465         EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG !! 392         EX_ST(STORE_BLK(%f0, %o4 + %g3))
466         add             %o4, 64, %o4              393         add             %o4, 64, %o4
467         bne,pt          %xcc, 1b                  394         bne,pt          %xcc, 1b
468          LOAD(prefetch, %o4 + 64, #one_read)      395          LOAD(prefetch, %o4 + 64, #one_read)
469                                                   396 
470 195:                                              397 195:
471         add             %o4, %g3, %o0             398         add             %o4, %g3, %o0
472         membar          #Sync                     399         membar          #Sync
473                                                   400 
474         VISExitHalf                               401         VISExitHalf
475                                                   402 
476         /* %o2 contains any final bytes still     403         /* %o2 contains any final bytes still needed to be copied
477          * over. If anything is left, we copy     404          * over. If anything is left, we copy it one byte at a time.
478          */                                       405          */
479         brz,pt          %o2, 85f                  406         brz,pt          %o2, 85f
480          sub            %o0, %o1, GLOBAL_SPARE !! 407          sub            %o0, %o1, %o3
481         ba,a,pt         %XCC, 90f                 408         ba,a,pt         %XCC, 90f
482          nop                                   << 
483                                                   409 
484         .align          64                        410         .align          64
485 75: /* 16 < len <= 64 */                          411 75: /* 16 < len <= 64 */
486         bne,pn          %XCC, 75f                 412         bne,pn          %XCC, 75f
487          sub            %o0, %o1, GLOBAL_SPARE !! 413          sub            %o0, %o1, %o3
488                                                   414 
489 72:                                               415 72:
490         andn            %o2, 0xf, %o4             416         andn            %o2, 0xf, %o4
491         and             %o2, 0xf, %o2             417         and             %o2, 0xf, %o2
492 1:      subcc           %o4, 0x10, %o4            418 1:      subcc           %o4, 0x10, %o4
493         EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2 !! 419         EX_LD(LOAD(ldx, %o1, %o5))
494         add             %o1, 0x08, %o1            420         add             %o1, 0x08, %o1
495         EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2 !! 421         EX_LD(LOAD(ldx, %o1, %g1))
496         sub             %o1, 0x08, %o1            422         sub             %o1, 0x08, %o1
497         EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPA !! 423         EX_ST(STORE(stx, %o5, %o1 + %o3))
498         add             %o1, 0x8, %o1             424         add             %o1, 0x8, %o1
499         EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPA !! 425         EX_ST(STORE(stx, %g1, %o1 + %o3))
500         bgu,pt          %XCC, 1b                  426         bgu,pt          %XCC, 1b
501          add            %o1, 0x8, %o1             427          add            %o1, 0x8, %o1
502 73:     andcc           %o2, 0x8, %g0             428 73:     andcc           %o2, 0x8, %g0
503         be,pt           %XCC, 1f                  429         be,pt           %XCC, 1f
504          nop                                      430          nop
505         sub             %o2, 0x8, %o2             431         sub             %o2, 0x8, %o2
506         EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2 !! 432         EX_LD(LOAD(ldx, %o1, %o5))
507         EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPA !! 433         EX_ST(STORE(stx, %o5, %o1 + %o3))
508         add             %o1, 0x8, %o1             434         add             %o1, 0x8, %o1
509 1:      andcc           %o2, 0x4, %g0             435 1:      andcc           %o2, 0x4, %g0
510         be,pt           %XCC, 1f                  436         be,pt           %XCC, 1f
511          nop                                      437          nop
512         sub             %o2, 0x4, %o2             438         sub             %o2, 0x4, %o2
513         EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o !! 439         EX_LD(LOAD(lduw, %o1, %o5))
514         EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPA !! 440         EX_ST(STORE(stw, %o5, %o1 + %o3))
515         add             %o1, 0x4, %o1             441         add             %o1, 0x4, %o1
516 1:      cmp             %o2, 0                    442 1:      cmp             %o2, 0
517         be,pt           %XCC, 85f                 443         be,pt           %XCC, 85f
518          nop                                      444          nop
519         ba,pt           %xcc, 90f                 445         ba,pt           %xcc, 90f
520          nop                                      446          nop
521                                                   447 
522 75:                                               448 75:
523         andcc           %o0, 0x7, %g1             449         andcc           %o0, 0x7, %g1
524         sub             %g1, 0x8, %g1             450         sub             %g1, 0x8, %g1
525         be,pn           %icc, 2f                  451         be,pn           %icc, 2f
526          sub            %g0, %g1, %g1             452          sub            %g0, %g1, %g1
527         sub             %o2, %g1, %o2             453         sub             %o2, %g1, %o2
528                                                   454 
529 1:      subcc           %g1, 1, %g1               455 1:      subcc           %g1, 1, %g1
530         EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o !! 456         EX_LD(LOAD(ldub, %o1, %o5))
531         EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPA !! 457         EX_ST(STORE(stb, %o5, %o1 + %o3))
532         bgu,pt          %icc, 1b                  458         bgu,pt          %icc, 1b
533          add            %o1, 1, %o1               459          add            %o1, 1, %o1
534                                                   460 
535 2:      add             %o1, GLOBAL_SPARE, %o0 !! 461 2:      add             %o1, %o3, %o0
536         andcc           %o1, 0x7, %g1             462         andcc           %o1, 0x7, %g1
537         bne,pt          %icc, 8f                  463         bne,pt          %icc, 8f
538          sll            %g1, 3, %g1               464          sll            %g1, 3, %g1
539                                                   465 
540         cmp             %o2, 16                   466         cmp             %o2, 16
541         bgeu,pt         %icc, 72b                 467         bgeu,pt         %icc, 72b
542          nop                                      468          nop
543         ba,a,pt         %xcc, 73b                 469         ba,a,pt         %xcc, 73b
544                                                   470 
545 8:      mov             64, GLOBAL_SPARE       !! 471 8:      mov             64, %o3
546         andn            %o1, 0x7, %o1             472         andn            %o1, 0x7, %o1
547         EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2 !! 473         EX_LD(LOAD(ldx, %o1, %g2))
548         sub             GLOBAL_SPARE, %g1, GLO !! 474         sub             %o3, %g1, %o3
549         andn            %o2, 0x7, %o4             475         andn            %o2, 0x7, %o4
550         sllx            %g2, %g1, %g2             476         sllx            %g2, %g1, %g2
551 1:      add             %o1, 0x8, %o1             477 1:      add             %o1, 0x8, %o1
552         EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2 !! 478         EX_LD(LOAD(ldx, %o1, %g3))
553         subcc           %o4, 0x8, %o4             479         subcc           %o4, 0x8, %o4
554         srlx            %g3, GLOBAL_SPARE, %o5 !! 480         srlx            %g3, %o3, %o5
555         or              %o5, %g2, %o5             481         or              %o5, %g2, %o5
556         EX_ST(STORE(stx, %o5, %o0), NG2_retl_o !! 482         EX_ST(STORE(stx, %o5, %o0))
557         add             %o0, 0x8, %o0             483         add             %o0, 0x8, %o0
558         bgu,pt          %icc, 1b                  484         bgu,pt          %icc, 1b
559          sllx           %g3, %g1, %g2             485          sllx           %g3, %g1, %g2
560                                                   486 
561         srl             %g1, 3, %g1               487         srl             %g1, 3, %g1
562         andcc           %o2, 0x7, %o2             488         andcc           %o2, 0x7, %o2
563         be,pn           %icc, 85f                 489         be,pn           %icc, 85f
564          add            %o1, %g1, %o1             490          add            %o1, %g1, %o1
565         ba,pt           %xcc, 90f                 491         ba,pt           %xcc, 90f
566          sub            %o0, %o1, GLOBAL_SPARE !! 492          sub            %o0, %o1, %o3
567                                                   493 
568         .align          64                        494         .align          64
569 80: /* 0 < len <= 16 */                           495 80: /* 0 < len <= 16 */
570         andcc           GLOBAL_SPARE, 0x3, %g0 !! 496         andcc           %o3, 0x3, %g0
571         bne,pn          %XCC, 90f                 497         bne,pn          %XCC, 90f
572          sub            %o0, %o1, GLOBAL_SPARE !! 498          sub            %o0, %o1, %o3
573                                                   499 
574 1:                                                500 1:
575         subcc           %o2, 4, %o2               501         subcc           %o2, 4, %o2
576         EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o !! 502         EX_LD(LOAD(lduw, %o1, %g1))
577         EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPA !! 503         EX_ST(STORE(stw, %g1, %o1 + %o3))
578         bgu,pt          %XCC, 1b                  504         bgu,pt          %XCC, 1b
579          add            %o1, 4, %o1               505          add            %o1, 4, %o1
580                                                   506 
581 85:     retl                                      507 85:     retl
582          mov            EX_RETVAL(%o3), %o0    !! 508          mov            EX_RETVAL(GLOBAL_SPARE), %o0
583                                                   509 
584         .align          32                        510         .align          32
585 90:                                               511 90:
586         subcc           %o2, 1, %o2               512         subcc           %o2, 1, %o2
587         EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o !! 513         EX_LD(LOAD(ldub, %o1, %g1))
588         EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPA !! 514         EX_ST(STORE(stb, %g1, %o1 + %o3))
589         bgu,pt          %XCC, 90b                 515         bgu,pt          %XCC, 90b
590          add            %o1, 1, %o1               516          add            %o1, 1, %o1
591         retl                                      517         retl
592          mov            EX_RETVAL(%o3), %o0    !! 518          mov            EX_RETVAL(GLOBAL_SPARE), %o0
593                                                   519 
594         .size           FUNC_NAME, .-FUNC_NAME    520         .size           FUNC_NAME, .-FUNC_NAME
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php