~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/lib/memcpy.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sparc/lib/memcpy.S (Architecture ppc) and /arch/mips/lib/memcpy.S (Architecture mips)


  1 /* SPDX-License-Identifier: GPL-2.0 */         !!   1 /*
  2 /* memcpy.S: Sparc optimized memcpy and memmov !!   2  * This file is subject to the terms and conditions of the GNU General Public
  3  * Hand optimized from GNU libc's memcpy and m !!   3  * License.  See the file "COPYING" in the main directory of this archive
  4  * Copyright (C) 1991,1996 Free Software Found !!   4  * for more details.
  5  * Copyright (C) 1995 Linus Torvalds (Linus.To !!   5  *
  6  * Copyright (C) 1996 David S. Miller (davem@c !!   6  * Unified implementation of memcpy, memmove and the __copy_user backend.
  7  * Copyright (C) 1996 Eddie C. Dost (ecd@skyne !!   7  *
  8  * Copyright (C) 1996 Jakub Jelinek (jj@sunsit !!   8  * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
                                                   >>   9  * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
                                                   >>  10  * Copyright (C) 2002 Broadcom, Inc.
                                                   >>  11  *   memcpy/copy_user author: Mark Vandevoorde
                                                   >>  12  * Copyright (C) 2007  Maciej W. Rozycki
                                                   >>  13  * Copyright (C) 2014 Imagination Technologies Ltd.
                                                   >>  14  *
                                                   >>  15  * Mnemonic names for arguments to memcpy/__copy_user
  9  */                                                16  */
 10                                                    17 
                                                   >>  18 /*
                                                   >>  19  * Hack to resolve longstanding prefetch issue
                                                   >>  20  *
                                                   >>  21  * Prefetching may be fatal on some systems if we're prefetching beyond the
                                                   >>  22  * end of memory on some systems.  It's also a seriously bad idea on non
                                                   >>  23  * dma-coherent systems.
                                                   >>  24  */
                                                   >>  25 #ifdef CONFIG_DMA_NONCOHERENT
                                                   >>  26 #undef CONFIG_CPU_HAS_PREFETCH
                                                   >>  27 #endif
                                                   >>  28 #ifdef CONFIG_MIPS_MALTA
                                                   >>  29 #undef CONFIG_CPU_HAS_PREFETCH
                                                   >>  30 #endif
                                                   >>  31 #ifdef CONFIG_CPU_MIPSR6
                                                   >>  32 #undef CONFIG_CPU_HAS_PREFETCH
                                                   >>  33 #endif
                                                   >>  34 
 11 #include <linux/export.h>                          35 #include <linux/export.h>
                                                   >>  36 #include <asm/asm.h>
                                                   >>  37 #include <asm/asm-offsets.h>
                                                   >>  38 #include <asm/regdef.h>
                                                   >>  39 
                                                   >>  40 #define dst a0
                                                   >>  41 #define src a1
                                                   >>  42 #define len a2
                                                   >>  43 
                                                   >>  44 /*
                                                   >>  45  * Spec
                                                   >>  46  *
                                                   >>  47  * memcpy copies len bytes from src to dst and sets v0 to dst.
                                                   >>  48  * It assumes that
                                                   >>  49  *   - src and dst don't overlap
                                                   >>  50  *   - src is readable
                                                   >>  51  *   - dst is writable
                                                   >>  52  * memcpy uses the standard calling convention
                                                   >>  53  *
                                                   >>  54  * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
                                                   >>  55  * the number of uncopied bytes due to an exception caused by a read or write.
                                                   >>  56  * __copy_user assumes that src and dst don't overlap, and that the call is
                                                   >>  57  * implementing one of the following:
                                                   >>  58  *   copy_to_user
                                                   >>  59  *     - src is readable  (no exceptions when reading src)
                                                   >>  60  *   copy_from_user
                                                   >>  61  *     - dst is writable  (no exceptions when writing dst)
                                                   >>  62  * __copy_user uses a non-standard calling convention; see
                                                   >>  63  * include/asm-mips/uaccess.h
                                                   >>  64  *
                                                   >>  65  * When an exception happens on a load, the handler must
                                                   >>  66  # ensure that all of the destination buffer is overwritten to prevent
                                                   >>  67  * leaking information to user mode programs.
                                                   >>  68  */
 12                                                    69 
 13 #define FUNC(x)                 \              !!  70 /*
 14         .globl  x;              \              !!  71  * Implementation
 15         .type   x,@function;    \              !!  72  */
 16         .align  4;              \              << 
 17 x:                                             << 
 18                                                << 
 19 /* Both these macros have to start with exactl << 
 20 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1 << 
 21         ldd     [%src + (offset) + 0x00], %t0; << 
 22         ldd     [%src + (offset) + 0x08], %t2; << 
 23         ldd     [%src + (offset) + 0x10], %t4; << 
 24         ldd     [%src + (offset) + 0x18], %t6; << 
 25         st      %t0, [%dst + (offset) + 0x00]; << 
 26         st      %t1, [%dst + (offset) + 0x04]; << 
 27         st      %t2, [%dst + (offset) + 0x08]; << 
 28         st      %t3, [%dst + (offset) + 0x0c]; << 
 29         st      %t4, [%dst + (offset) + 0x10]; << 
 30         st      %t5, [%dst + (offset) + 0x14]; << 
 31         st      %t6, [%dst + (offset) + 0x18]; << 
 32         st      %t7, [%dst + (offset) + 0x1c]; << 
 33                                                << 
 34 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t << 
 35         ldd     [%src + (offset) + 0x00], %t0; << 
 36         ldd     [%src + (offset) + 0x08], %t2; << 
 37         ldd     [%src + (offset) + 0x10], %t4; << 
 38         ldd     [%src + (offset) + 0x18], %t6; << 
 39         std     %t0, [%dst + (offset) + 0x00]; << 
 40         std     %t2, [%dst + (offset) + 0x08]; << 
 41         std     %t4, [%dst + (offset) + 0x10]; << 
 42         std     %t6, [%dst + (offset) + 0x18]; << 
 43                                                << 
 44 #define MOVE_LASTCHUNK(src, dst, offset, t0, t << 
 45         ldd     [%src - (offset) - 0x10], %t0; << 
 46         ldd     [%src - (offset) - 0x08], %t2; << 
 47         st      %t0, [%dst - (offset) - 0x10]; << 
 48         st      %t1, [%dst - (offset) - 0x0c]; << 
 49         st      %t2, [%dst - (offset) - 0x08]; << 
 50         st      %t3, [%dst - (offset) - 0x04]; << 
 51                                                << 
 52 #define MOVE_LASTALIGNCHUNK(src, dst, offset,  << 
 53         ldd     [%src - (offset) - 0x10], %t0; << 
 54         ldd     [%src - (offset) - 0x08], %t2; << 
 55         std     %t0, [%dst - (offset) - 0x10]; << 
 56         std     %t2, [%dst - (offset) - 0x08]; << 
 57                                                << 
 58 #define MOVE_SHORTCHUNK(src, dst, offset, t0,  << 
 59         ldub    [%src - (offset) - 0x02], %t0; << 
 60         ldub    [%src - (offset) - 0x01], %t1; << 
 61         stb     %t0, [%dst - (offset) - 0x02]; << 
 62         stb     %t1, [%dst - (offset) - 0x01]; << 
 63                                                    73 
 64         .text                                  !!  74 /*
 65         .align  4                              !!  75  * The exception handler for loads requires that:
                                                   >>  76  *  1- AT contain the address of the byte just past the end of the source
                                                   >>  77  *     of the copy,
                                                   >>  78  *  2- src_entry <= src < AT, and
                                                   >>  79  *  3- (dst - src) == (dst_entry - src_entry),
                                                   >>  80  * The _entry suffix denotes values when __copy_user was called.
                                                   >>  81  *
                                                   >>  82  * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
                                                   >>  83  * (2) is met by incrementing src by the number of bytes copied
                                                   >>  84  * (3) is met by not doing loads between a pair of increments of dst and src
                                                   >>  85  *
                                                   >>  86  * The exception handlers for stores adjust len (if necessary) and return.
                                                   >>  87  * These handlers do not need to overwrite any data.
                                                   >>  88  *
                                                   >>  89  * For __rmemcpy and memmove an exception is always a kernel bug, therefore
                                                   >>  90  * they're not protected.
                                                   >>  91  */
 66                                                    92 
 67 FUNC(memmove)                                  !!  93 /* Instruction type */
 68 EXPORT_SYMBOL(memmove)                         !!  94 #define LD_INSN 1
 69         cmp             %o0, %o1               !!  95 #define ST_INSN 2
 70         mov             %o0, %g7               !!  96 /* Pretech type */
 71         bleu            9f                     !!  97 #define SRC_PREFETCH 1
 72          sub            %o0, %o1, %o4          !!  98 #define DST_PREFETCH 2
 73                                                !!  99 #define LEGACY_MODE 1
 74         add             %o1, %o2, %o3          !! 100 #define EVA_MODE    2
 75         cmp             %o3, %o0               !! 101 #define USEROP   1
 76         bleu            0f                     !! 102 #define KERNELOP 2
 77          andcc          %o4, 3, %o5            !! 103 
 78                                                !! 104 /*
 79         add             %o1, %o2, %o1          !! 105  * Wrapper to add an entry in the exception table
 80         add             %o0, %o2, %o0          !! 106  * in case the insn causes a memory exception.
 81         sub             %o1, 1, %o1            !! 107  * Arguments:
 82         sub             %o0, 1, %o0            !! 108  * insn    : Load/store instruction
 83                                                !! 109  * type    : Instruction type
 84 1:      /* reverse_bytes */                    !! 110  * reg     : Register
 85                                                !! 111  * addr    : Address
 86         ldub            [%o1], %o4             !! 112  * handler : Exception handler
 87         subcc           %o2, 1, %o2            !! 113  */
 88         stb             %o4, [%o0]             << 
 89         sub             %o1, 1, %o1            << 
 90         bne             1b                     << 
 91          sub            %o0, 1, %o0            << 
 92                                                << 
 93         retl                                   << 
 94          mov            %g7, %o0               << 
 95                                                << 
 96 /* NOTE: This code is executed just for the ca << 
 97          where %src (=%o1) & 3 is != 0.        << 
 98          We need to align it to 4. So, for (%s << 
 99          1 we need to do ldub,lduh             << 
100          2 lduh                                << 
101          3 just ldub                           << 
102          so even if it looks weird, the branch << 
103          are correct here. -jj                 << 
104  */                                            << 
105 78:     /* dword_align */                      << 
106                                                << 
107         andcc           %o1, 1, %g0            << 
108         be              4f                     << 
109          andcc          %o1, 2, %g0            << 
110                                                << 
111         ldub            [%o1], %g2             << 
112         add             %o1, 1, %o1            << 
113         stb             %g2, [%o0]             << 
114         sub             %o2, 1, %o2            << 
115         bne             3f                     << 
116          add            %o0, 1, %o0            << 
117 4:                                             << 
118         lduh            [%o1], %g2             << 
119         add             %o1, 2, %o1            << 
120         sth             %g2, [%o0]             << 
121         sub             %o2, 2, %o2            << 
122         b               3f                     << 
123          add            %o0, 2, %o0            << 
124                                                   114 
125 FUNC(memcpy)    /* %o0=dst %o1=src %o2=len */  !! 115 #define EXC(insn, type, reg, addr, handler)                     \
126 EXPORT_SYMBOL(memcpy)                          !! 116         .if \mode == LEGACY_MODE;                               \
                                                   >> 117 9:              insn reg, addr;                                 \
                                                   >> 118                 .section __ex_table,"a";                        \
                                                   >> 119                 PTR_WD  9b, handler;                            \
                                                   >> 120                 .previous;                                      \
                                                   >> 121         /* This is assembled in EVA mode */                     \
                                                   >> 122         .else;                                                  \
                                                   >> 123                 /* If loading from user or storing to user */   \
                                                   >> 124                 .if ((\from == USEROP) && (type == LD_INSN)) || \
                                                   >> 125                     ((\to == USEROP) && (type == ST_INSN));     \
                                                   >> 126 9:                      __BUILD_EVA_INSN(insn##e, reg, addr);   \
                                                   >> 127                         .section __ex_table,"a";                \
                                                   >> 128                         PTR_WD  9b, handler;                    \
                                                   >> 129                         .previous;                              \
                                                   >> 130                 .else;                                          \
                                                   >> 131                         /*                                      \
                                                   >> 132                          *  Still in EVA, but no need for       \
                                                   >> 133                          * exception handler or EVA insn        \
                                                   >> 134                          */                                     \
                                                   >> 135                         insn reg, addr;                         \
                                                   >> 136                 .endif;                                         \
                                                   >> 137         .endif
127                                                   138 
128         sub             %o0, %o1, %o4          !! 139 /*
129         mov             %o0, %g7               !! 140  * Only on the 64-bit kernel we can made use of 64-bit registers.
130 9:                                             !! 141  */
131         andcc           %o4, 3, %o5            !! 142 #ifdef CONFIG_64BIT
132 0:                                             !! 143 #define USE_DOUBLE
133         bne             86f                    !! 144 #endif
134          cmp            %o2, 15                !! 145 
135                                                !! 146 #ifdef USE_DOUBLE
136         bleu            90f                    !! 147 
137          andcc          %o1, 3, %g0            !! 148 #define LOADK ld /* No exception */
138                                                !! 149 #define LOAD(reg, addr, handler)        EXC(ld, LD_INSN, reg, addr, handler)
139         bne             78b                    !! 150 #define LOADL(reg, addr, handler)       EXC(ldl, LD_INSN, reg, addr, handler)
140 3:                                             !! 151 #define LOADR(reg, addr, handler)       EXC(ldr, LD_INSN, reg, addr, handler)
141          andcc          %o1, 4, %g0            !! 152 #define STOREL(reg, addr, handler)      EXC(sdl, ST_INSN, reg, addr, handler)
142                                                !! 153 #define STORER(reg, addr, handler)      EXC(sdr, ST_INSN, reg, addr, handler)
143         be              2f                     !! 154 #define STORE(reg, addr, handler)       EXC(sd, ST_INSN, reg, addr, handler)
144          mov            %o2, %g1               !! 155 #define ADD    daddu
145                                                !! 156 #define SUB    dsubu
146         ld              [%o1], %o4             !! 157 #define SRL    dsrl
147         sub             %g1, 4, %g1            !! 158 #define SRA    dsra
148         st              %o4, [%o0]             !! 159 #define SLL    dsll
149         add             %o1, 4, %o1            !! 160 #define SLLV   dsllv
150         add             %o0, 4, %o0            !! 161 #define SRLV   dsrlv
151 2:                                             !! 162 #define NBYTES 8
152         andcc           %g1, 0xffffff80, %g0   !! 163 #define LOG_NBYTES 3
153         be              3f                     !! 164 
154          andcc          %o0, 4, %g0            !! 165 /*
155                                                !! 166  * As we are sharing code base with the mips32 tree (which use the o32 ABI
156         be              82f + 4                !! 167  * register definitions). We need to redefine the register definitions from
157 5:                                             !! 168  * the n64 ABI register naming to the o32 ABI register naming.
158         MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4 !! 169  */
159         MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4 !! 170 #undef t0
160         MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4 !! 171 #undef t1
161         MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4 !! 172 #undef t2
162         sub             %g1, 128, %g1          !! 173 #undef t3
163         add             %o1, 128, %o1          !! 174 #define t0      $8
164         cmp             %g1, 128               !! 175 #define t1      $9
165         bge             5b                     !! 176 #define t2      $10
166          add            %o0, 128, %o0          !! 177 #define t3      $11
167 3:                                             !! 178 #define t4      $12
168         andcc           %g1, 0x70, %g4         !! 179 #define t5      $13
169         be              80f                    !! 180 #define t6      $14
170          andcc          %g1, 8, %g0            !! 181 #define t7      $15
171                                                !! 182 
172         sethi           %hi(80f), %o5          !! 183 #else
173         srl             %g4, 1, %o4            !! 184 
174         add             %g4, %o4, %o4          !! 185 #define LOADK lw /* No exception */
175         add             %o1, %g4, %o1          !! 186 #define LOAD(reg, addr, handler)        EXC(lw, LD_INSN, reg, addr, handler)
176         sub             %o5, %o4, %o5          !! 187 #define LOADL(reg, addr, handler)       EXC(lwl, LD_INSN, reg, addr, handler)
177         jmpl            %o5 + %lo(80f), %g0    !! 188 #define LOADR(reg, addr, handler)       EXC(lwr, LD_INSN, reg, addr, handler)
178          add            %o0, %g4, %o0          !! 189 #define STOREL(reg, addr, handler)      EXC(swl, ST_INSN, reg, addr, handler)
179                                                !! 190 #define STORER(reg, addr, handler)      EXC(swr, ST_INSN, reg, addr, handler)
180 79:     /* memcpy_table */                     !! 191 #define STORE(reg, addr, handler)       EXC(sw, ST_INSN, reg, addr, handler)
181                                                !! 192 #define ADD    addu
182         MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g !! 193 #define SUB    subu
183         MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g !! 194 #define SRL    srl
184         MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g !! 195 #define SLL    sll
185         MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g !! 196 #define SRA    sra
186         MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g !! 197 #define SLLV   sllv
187         MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g !! 198 #define SRLV   srlv
188         MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g !! 199 #define NBYTES 4
189                                                !! 200 #define LOG_NBYTES 2
190 80:     /* memcpy_table_end */                 !! 201 
191         be              81f                    !! 202 #endif /* USE_DOUBLE */
192          andcc          %g1, 4, %g0            !! 203 
193                                                !! 204 #define LOADB(reg, addr, handler)       EXC(lb, LD_INSN, reg, addr, handler)
194         ldd             [%o1], %g2             !! 205 #define STOREB(reg, addr, handler)      EXC(sb, ST_INSN, reg, addr, handler)
195         add             %o0, 8, %o0            !! 206 
196         st              %g2, [%o0 - 0x08]      !! 207 #ifdef CONFIG_CPU_HAS_PREFETCH
197         add             %o1, 8, %o1            !! 208 # define _PREF(hint, addr, type)                                        \
198         st              %g3, [%o0 - 0x04]      !! 209         .if \mode == LEGACY_MODE;                                       \
199                                                !! 210                 kernel_pref(hint, addr);                                \
200 81:     /* memcpy_last7 */                     !! 211         .else;                                                          \
201                                                !! 212                 .if ((\from == USEROP) && (type == SRC_PREFETCH)) ||    \
202         be              1f                     !! 213                     ((\to == USEROP) && (type == DST_PREFETCH));        \
203          andcc          %g1, 2, %g0            !! 214                         /*                                              \
204                                                !! 215                          * PREFE has only 9 bits for the offset         \
205         ld              [%o1], %g2             !! 216                          * compared to PREF which has 16, so it may     \
206         add             %o1, 4, %o1            !! 217                          * need to use the $at register but this        \
207         st              %g2, [%o0]             !! 218                          * register should remain intact because it's   \
208         add             %o0, 4, %o0            !! 219                          * used later on. Therefore use $v1.            \
209 1:                                             !! 220                          */                                             \
210         be              1f                     !! 221                         .set at=v1;                                     \
211          andcc          %g1, 1, %g0            !! 222                         user_pref(hint, addr);                          \
                                                   >> 223                         .set noat;                                      \
                                                   >> 224                 .else;                                                  \
                                                   >> 225                         kernel_pref(hint, addr);                        \
                                                   >> 226                 .endif;                                                 \
                                                   >> 227         .endif
                                                   >> 228 #else
                                                   >> 229 # define _PREF(hint, addr, type)
                                                   >> 230 #endif
                                                   >> 231 
                                                   >> 232 #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
                                                   >> 233 #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
                                                   >> 234 
                                                   >> 235 #ifdef CONFIG_CPU_LITTLE_ENDIAN
                                                   >> 236 #define LDFIRST LOADR
                                                   >> 237 #define LDREST  LOADL
                                                   >> 238 #define STFIRST STORER
                                                   >> 239 #define STREST  STOREL
                                                   >> 240 #define SHIFT_DISCARD SLLV
                                                   >> 241 #else
                                                   >> 242 #define LDFIRST LOADL
                                                   >> 243 #define LDREST  LOADR
                                                   >> 244 #define STFIRST STOREL
                                                   >> 245 #define STREST  STORER
                                                   >> 246 #define SHIFT_DISCARD SRLV
                                                   >> 247 #endif
                                                   >> 248 
                                                   >> 249 #define FIRST(unit) ((unit)*NBYTES)
                                                   >> 250 #define REST(unit)  (FIRST(unit)+NBYTES-1)
                                                   >> 251 #define UNIT(unit)  FIRST(unit)
212                                                   252 
213         lduh            [%o1], %g2             !! 253 #define ADDRMASK (NBYTES-1)
214         add             %o1, 2, %o1            << 
215         sth             %g2, [%o0]             << 
216         add             %o0, 2, %o0            << 
217 1:                                             << 
218         be              1f                     << 
219          nop                                   << 
220                                                   254 
221         ldub            [%o1], %g2             !! 255         .text
222         stb             %g2, [%o0]             !! 256         .set    noreorder
                                                   >> 257 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
                                                   >> 258         .set    noat
                                                   >> 259 #else
                                                   >> 260         .set    at=v1
                                                   >> 261 #endif
                                                   >> 262 
                                                   >> 263         .align  5
                                                   >> 264 
                                                   >> 265         /*
                                                   >> 266          * Macro to build the __copy_user common code
                                                   >> 267          * Arguments:
                                                   >> 268          * mode : LEGACY_MODE or EVA_MODE
                                                   >> 269          * from : Source operand. USEROP or KERNELOP
                                                   >> 270          * to   : Destination operand. USEROP or KERNELOP
                                                   >> 271          */
                                                   >> 272         .macro __BUILD_COPY_USER mode, from, to
                                                   >> 273 
                                                   >> 274         /* initialize __memcpy if this the first time we execute this macro */
                                                   >> 275         .ifnotdef __memcpy
                                                   >> 276         .set __memcpy, 1
                                                   >> 277         .hidden __memcpy /* make sure it does not leak */
                                                   >> 278         .endif
                                                   >> 279 
                                                   >> 280         /*
                                                   >> 281          * Note: dst & src may be unaligned, len may be 0
                                                   >> 282          * Temps
                                                   >> 283          */
                                                   >> 284 #define rem t8
                                                   >> 285 
                                                   >> 286         R10KCBARRIER(0(ra))
                                                   >> 287         /*
                                                   >> 288          * The "issue break"s below are very approximate.
                                                   >> 289          * Issue delays for dcache fills will perturb the schedule, as will
                                                   >> 290          * load queue full replay traps, etc.
                                                   >> 291          *
                                                   >> 292          * If len < NBYTES use byte operations.
                                                   >> 293          */
                                                   >> 294         PREFS(  0, 0(src) )
                                                   >> 295         PREFD(  1, 0(dst) )
                                                   >> 296         sltu    t2, len, NBYTES
                                                   >> 297         and     t1, dst, ADDRMASK
                                                   >> 298         PREFS(  0, 1*32(src) )
                                                   >> 299         PREFD(  1, 1*32(dst) )
                                                   >> 300         bnez    t2, .Lcopy_bytes_checklen\@
                                                   >> 301          and    t0, src, ADDRMASK
                                                   >> 302         PREFS(  0, 2*32(src) )
                                                   >> 303         PREFD(  1, 2*32(dst) )
                                                   >> 304 #ifndef CONFIG_CPU_NO_LOAD_STORE_LR
                                                   >> 305         bnez    t1, .Ldst_unaligned\@
                                                   >> 306          nop
                                                   >> 307         bnez    t0, .Lsrc_unaligned_dst_aligned\@
                                                   >> 308 #else /* CONFIG_CPU_NO_LOAD_STORE_LR */
                                                   >> 309         or      t0, t0, t1
                                                   >> 310         bnez    t0, .Lcopy_unaligned_bytes\@
                                                   >> 311 #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
                                                   >> 312         /*
                                                   >> 313          * use delay slot for fall-through
                                                   >> 314          * src and dst are aligned; need to compute rem
                                                   >> 315          */
                                                   >> 316 .Lboth_aligned\@:
                                                   >> 317          SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
                                                   >> 318         beqz    t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
                                                   >> 319          and    rem, len, (8*NBYTES-1)   # rem = len % (8*NBYTES)
                                                   >> 320         PREFS(  0, 3*32(src) )
                                                   >> 321         PREFD(  1, 3*32(dst) )
                                                   >> 322         .align  4
223 1:                                                323 1:
224         retl                                   !! 324         R10KCBARRIER(0(ra))
225          mov            %g7, %o0               !! 325         LOAD(t0, UNIT(0)(src), .Ll_exc\@)
                                                   >> 326         LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
                                                   >> 327         LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
                                                   >> 328         LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
                                                   >> 329         SUB     len, len, 8*NBYTES
                                                   >> 330         LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
                                                   >> 331         LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
                                                   >> 332         STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
                                                   >> 333         STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
                                                   >> 334         LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
                                                   >> 335         LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
                                                   >> 336         ADD     src, src, 8*NBYTES
                                                   >> 337         ADD     dst, dst, 8*NBYTES
                                                   >> 338         STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
                                                   >> 339         STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
                                                   >> 340         STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
                                                   >> 341         STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
                                                   >> 342         STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
                                                   >> 343         STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
                                                   >> 344         PREFS(  0, 8*32(src) )
                                                   >> 345         PREFD(  1, 8*32(dst) )
                                                   >> 346         bne     len, rem, 1b
                                                   >> 347          nop
226                                                   348 
227 82:     /* ldd_std */                          !! 349         /*
228         MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o !! 350          * len == rem == the number of bytes left to copy < 8*NBYTES
229         MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o !! 351          */
230         MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o !! 352 .Lcleanup_both_aligned\@:
231         MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o !! 353         beqz    len, .Ldone\@
232         subcc           %g1, 128, %g1          !! 354          sltu   t0, len, 4*NBYTES
233         add             %o1, 128, %o1          !! 355         bnez    t0, .Lless_than_4units\@
234         cmp             %g1, 128               !! 356          and    rem, len, (NBYTES-1)    # rem = len % NBYTES
235         bge             82b                    !! 357         /*
236          add            %o0, 128, %o0          !! 358          * len >= 4*NBYTES
237                                                !! 359          */
238         andcc           %g1, 0x70, %g4         !! 360         LOAD( t0, UNIT(0)(src), .Ll_exc\@)
239         be              84f                    !! 361         LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
240          andcc          %g1, 8, %g0            !! 362         LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
241                                                !! 363         LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
242         sethi           %hi(84f), %o5          !! 364         SUB     len, len, 4*NBYTES
243         add             %o1, %g4, %o1          !! 365         ADD     src, src, 4*NBYTES
244         sub             %o5, %g4, %o5          !! 366         R10KCBARRIER(0(ra))
245         jmpl            %o5 + %lo(84f), %g0    !! 367         STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
246          add            %o0, %g4, %o0          !! 368         STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
247                                                !! 369         STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
248 83:     /* amemcpy_table */                    !! 370         STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
249                                                !! 371         .set    reorder                         /* DADDI_WAR */
250         MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2,  !! 372         ADD     dst, dst, 4*NBYTES
251         MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2,  !! 373         beqz    len, .Ldone\@
252         MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2,  !! 374         .set    noreorder
253         MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2,  !! 375 .Lless_than_4units\@:
254         MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2,  !! 376         /*
255         MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2,  !! 377          * rem = len % NBYTES
256         MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2,  !! 378          */
257                                                !! 379         beq     rem, len, .Lcopy_bytes\@
258 84:     /* amemcpy_table_end */                !! 380          nop
259         be              85f                    << 
260          andcc          %g1, 4, %g0            << 
261                                                << 
262         ldd             [%o1], %g2             << 
263         add             %o0, 8, %o0            << 
264         std             %g2, [%o0 - 0x08]      << 
265         add             %o1, 8, %o1            << 
266 85:     /* amemcpy_last7 */                    << 
267         be              1f                     << 
268          andcc          %g1, 2, %g0            << 
269                                                << 
270         ld              [%o1], %g2             << 
271         add             %o1, 4, %o1            << 
272         st              %g2, [%o0]             << 
273         add             %o0, 4, %o0            << 
274 1:                                                381 1:
275         be              1f                     !! 382         R10KCBARRIER(0(ra))
276          andcc          %g1, 1, %g0            !! 383         LOAD(t0, 0(src), .Ll_exc\@)
277                                                !! 384         ADD     src, src, NBYTES
278         lduh            [%o1], %g2             !! 385         SUB     len, len, NBYTES
279         add             %o1, 2, %o1            !! 386         STORE(t0, 0(dst), .Ls_exc_p1u\@)
280         sth             %g2, [%o0]             !! 387         .set    reorder                         /* DADDI_WAR */
281         add             %o0, 2, %o0            !! 388         ADD     dst, dst, NBYTES
                                                   >> 389         bne     rem, len, 1b
                                                   >> 390         .set    noreorder
                                                   >> 391 
                                                   >> 392 #ifndef CONFIG_CPU_NO_LOAD_STORE_LR
                                                   >> 393         /*
                                                   >> 394          * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
                                                   >> 395          * A loop would do only a byte at a time with possible branch
                                                   >> 396          * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
                                                   >> 397          * because can't assume read-access to dst.  Instead, use
                                                   >> 398          * STREST dst, which doesn't require read access to dst.
                                                   >> 399          *
                                                   >> 400          * This code should perform better than a simple loop on modern,
                                                   >> 401          * wide-issue mips processors because the code has fewer branches and
                                                   >> 402          * more instruction-level parallelism.
                                                   >> 403          */
                                                   >> 404 #define bits t2
                                                   >> 405         beqz    len, .Ldone\@
                                                   >> 406          ADD    t1, dst, len    # t1 is just past last byte of dst
                                                   >> 407         li      bits, 8*NBYTES
                                                   >> 408         SLL     rem, len, 3     # rem = number of bits to keep
                                                   >> 409         LOAD(t0, 0(src), .Ll_exc\@)
                                                   >> 410         SUB     bits, bits, rem # bits = number of bits to discard
                                                   >> 411         SHIFT_DISCARD t0, t0, bits
                                                   >> 412         STREST(t0, -1(t1), .Ls_exc\@)
                                                   >> 413         jr      ra
                                                   >> 414          move   len, zero
                                                   >> 415 .Ldst_unaligned\@:
                                                   >> 416         /*
                                                   >> 417          * dst is unaligned
                                                   >> 418          * t0 = src & ADDRMASK
                                                   >> 419          * t1 = dst & ADDRMASK; T1 > 0
                                                   >> 420          * len >= NBYTES
                                                   >> 421          *
                                                   >> 422          * Copy enough bytes to align dst
                                                   >> 423          * Set match = (src and dst have same alignment)
                                                   >> 424          */
                                                   >> 425 #define match rem
                                                   >> 426         LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
                                                   >> 427         ADD     t2, zero, NBYTES
                                                   >> 428         LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
                                                   >> 429         SUB     t2, t2, t1      # t2 = number of bytes copied
                                                   >> 430         xor     match, t0, t1
                                                   >> 431         R10KCBARRIER(0(ra))
                                                   >> 432         STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
                                                   >> 433         beq     len, t2, .Ldone\@
                                                   >> 434          SUB    len, len, t2
                                                   >> 435         ADD     dst, dst, t2
                                                   >> 436         beqz    match, .Lboth_aligned\@
                                                   >> 437          ADD    src, src, t2
                                                   >> 438 
                                                   >> 439 .Lsrc_unaligned_dst_aligned\@:
                                                   >> 440         SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
                                                   >> 441         PREFS(  0, 3*32(src) )
                                                   >> 442         beqz    t0, .Lcleanup_src_unaligned\@
                                                   >> 443          and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
                                                   >> 444         PREFD(  1, 3*32(dst) )
282 1:                                                445 1:
283         be              1f                     !! 446 /*
                                                   >> 447  * Avoid consecutive LD*'s to the same register since some mips
                                                   >> 448  * implementations can't issue them in the same cycle.
                                                   >> 449  * It's OK to load FIRST(N+1) before REST(N) because the two addresses
                                                   >> 450  * are to the same unit (unless src is aligned, but it's not).
                                                   >> 451  */
                                                   >> 452         R10KCBARRIER(0(ra))
                                                   >> 453         LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
                                                   >> 454         LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
                                                   >> 455         SUB     len, len, 4*NBYTES
                                                   >> 456         LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
                                                   >> 457         LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
                                                   >> 458         LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
                                                   >> 459         LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
                                                   >> 460         LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
                                                   >> 461         LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
                                                   >> 462         PREFS(  0, 9*32(src) )          # 0 is PREF_LOAD  (not streamed)
                                                   >> 463         ADD     src, src, 4*NBYTES
                                                   >> 464 #ifdef CONFIG_CPU_SB1
                                                   >> 465         nop                             # improves slotting
                                                   >> 466 #endif
                                                   >> 467         STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
                                                   >> 468         STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
                                                   >> 469         STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
                                                   >> 470         STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
                                                   >> 471         PREFD(  1, 9*32(dst) )          # 1 is PREF_STORE (not streamed)
                                                   >> 472         .set    reorder                         /* DADDI_WAR */
                                                   >> 473         ADD     dst, dst, 4*NBYTES
                                                   >> 474         bne     len, rem, 1b
                                                   >> 475         .set    noreorder
                                                   >> 476 
                                                   >> 477 .Lcleanup_src_unaligned\@:
                                                   >> 478         beqz    len, .Ldone\@
                                                   >> 479          and    rem, len, NBYTES-1  # rem = len % NBYTES
                                                   >> 480         beq     rem, len, .Lcopy_bytes\@
284          nop                                      481          nop
285                                                << 
286         ldub            [%o1], %g2             << 
287         stb             %g2, [%o0]             << 
288 1:                                                482 1:
289         retl                                   !! 483         R10KCBARRIER(0(ra))
290          mov            %g7, %o0               !! 484         LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
                                                   >> 485         LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
                                                   >> 486         ADD     src, src, NBYTES
                                                   >> 487         SUB     len, len, NBYTES
                                                   >> 488         STORE(t0, 0(dst), .Ls_exc_p1u\@)
                                                   >> 489         .set    reorder                         /* DADDI_WAR */
                                                   >> 490         ADD     dst, dst, NBYTES
                                                   >> 491         bne     len, rem, 1b
                                                   >> 492         .set    noreorder
                                                   >> 493 
                                                   >> 494 #endif /* !CONFIG_CPU_NO_LOAD_STORE_LR */
                                                   >> 495 .Lcopy_bytes_checklen\@:
                                                   >> 496         beqz    len, .Ldone\@
                                                   >> 497          nop
                                                   >> 498 .Lcopy_bytes\@:
                                                   >> 499         /* 0 < len < NBYTES  */
                                                   >> 500         R10KCBARRIER(0(ra))
                                                   >> 501 #define COPY_BYTE(N)                    \
                                                   >> 502         LOADB(t0, N(src), .Ll_exc\@);   \
                                                   >> 503         SUB     len, len, 1;            \
                                                   >> 504         beqz    len, .Ldone\@;          \
                                                   >> 505         STOREB(t0, N(dst), .Ls_exc_p1\@)
                                                   >> 506 
                                                   >> 507         COPY_BYTE(0)
                                                   >> 508         COPY_BYTE(1)
                                                   >> 509 #ifdef USE_DOUBLE
                                                   >> 510         COPY_BYTE(2)
                                                   >> 511         COPY_BYTE(3)
                                                   >> 512         COPY_BYTE(4)
                                                   >> 513         COPY_BYTE(5)
                                                   >> 514 #endif
                                                   >> 515         LOADB(t0, NBYTES-2(src), .Ll_exc\@)
                                                   >> 516         SUB     len, len, 1
                                                   >> 517         jr      ra
                                                   >> 518         STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
                                                   >> 519 .Ldone\@:
                                                   >> 520         jr      ra
                                                   >> 521          nop
291                                                   522 
292 86:     /* non_aligned */                      !! 523 #ifdef CONFIG_CPU_NO_LOAD_STORE_LR
293         cmp             %o2, 6                 !! 524 .Lcopy_unaligned_bytes\@:
294         bleu            88f                    << 
295          nop                                   << 
296                                                << 
297         save            %sp, -96, %sp          << 
298         andcc           %i0, 3, %g0            << 
299         be              61f                    << 
300          andcc          %i0, 1, %g0            << 
301         be              60f                    << 
302          andcc          %i0, 2, %g0            << 
303                                                << 
304         ldub            [%i1], %g5             << 
305         add             %i1, 1, %i1            << 
306         stb             %g5, [%i0]             << 
307         sub             %i2, 1, %i2            << 
308         bne             61f                    << 
309          add            %i0, 1, %i0            << 
310 60:                                            << 
311         ldub            [%i1], %g3             << 
312         add             %i1, 2, %i1            << 
313         stb             %g3, [%i0]             << 
314         sub             %i2, 2, %i2            << 
315         ldub            [%i1 - 1], %g3         << 
316         add             %i0, 2, %i0            << 
317         stb             %g3, [%i0 - 1]         << 
318 61:                                            << 
319         and             %i1, 3, %g2            << 
320         and             %i2, 0xc, %g3          << 
321         and             %i1, -4, %i1           << 
322         cmp             %g3, 4                 << 
323         sll             %g2, 3, %g4            << 
324         mov             32, %g2                << 
325         be              4f                     << 
326          sub            %g2, %g4, %l0          << 
327                                                << 
328         blu             3f                     << 
329          cmp            %g3, 0x8               << 
330                                                << 
331         be              2f                     << 
332          srl            %i2, 2, %g3            << 
333                                                << 
334         ld              [%i1], %i3             << 
335         add             %i0, -8, %i0           << 
336         ld              [%i1 + 4], %i4         << 
337         b               8f                     << 
338          add            %g3, 1, %g3            << 
339 2:                                             << 
340         ld              [%i1], %i4             << 
341         add             %i0, -12, %i0          << 
342         ld              [%i1 + 4], %i5         << 
343         add             %g3, 2, %g3            << 
344         b               9f                     << 
345          add            %i1, -4, %i1           << 
346 3:                                             << 
347         ld              [%i1], %g1             << 
348         add             %i0, -4, %i0           << 
349         ld              [%i1 + 4], %i3         << 
350         srl             %i2, 2, %g3            << 
351         b               7f                     << 
352          add            %i1, 4, %i1            << 
353 4:                                             << 
354         ld              [%i1], %i5             << 
355         cmp             %i2, 7                 << 
356         ld              [%i1 + 4], %g1         << 
357         srl             %i2, 2, %g3            << 
358         bleu            10f                    << 
359          add            %i1, 8, %i1            << 
360                                                << 
361         ld              [%i1], %i3             << 
362         add             %g3, -1, %g3           << 
363 5:                                             << 
364         sll             %i5, %g4, %g2          << 
365         srl             %g1, %l0, %g5          << 
366         or              %g2, %g5, %g2          << 
367         st              %g2, [%i0]             << 
368 7:                                             << 
369         ld              [%i1 + 4], %i4         << 
370         sll             %g1, %g4, %g2          << 
371         srl             %i3, %l0, %g5          << 
372         or              %g2, %g5, %g2          << 
373         st              %g2, [%i0 + 4]         << 
374 8:                                             << 
375         ld              [%i1 + 8], %i5         << 
376         sll             %i3, %g4, %g2          << 
377         srl             %i4, %l0, %g5          << 
378         or              %g2, %g5, %g2          << 
379         st              %g2, [%i0 + 8]         << 
380 9:                                             << 
381         ld              [%i1 + 12], %g1        << 
382         sll             %i4, %g4, %g2          << 
383         srl             %i5, %l0, %g5          << 
384         addcc           %g3, -4, %g3           << 
385         or              %g2, %g5, %g2          << 
386         add             %i1, 16, %i1           << 
387         st              %g2, [%i0 + 12]        << 
388         add             %i0, 16, %i0           << 
389         bne,a           5b                     << 
390          ld             [%i1], %i3             << 
391 10:                                            << 
392         sll             %i5, %g4, %g2          << 
393         srl             %g1, %l0, %g5          << 
394         srl             %l0, 3, %g3            << 
395         or              %g2, %g5, %g2          << 
396         sub             %i1, %g3, %i1          << 
397         andcc           %i2, 2, %g0            << 
398         st              %g2, [%i0]             << 
399         be              1f                     << 
400          andcc          %i2, 1, %g0            << 
401                                                << 
402         ldub            [%i1], %g2             << 
403         add             %i1, 2, %i1            << 
404         stb             %g2, [%i0 + 4]         << 
405         add             %i0, 2, %i0            << 
406         ldub            [%i1 - 1], %g2         << 
407         stb             %g2, [%i0 + 3]         << 
408 1:                                                525 1:
409         be              1f                     !! 526         COPY_BYTE(0)
                                                   >> 527         COPY_BYTE(1)
                                                   >> 528         COPY_BYTE(2)
                                                   >> 529         COPY_BYTE(3)
                                                   >> 530         COPY_BYTE(4)
                                                   >> 531         COPY_BYTE(5)
                                                   >> 532         COPY_BYTE(6)
                                                   >> 533         COPY_BYTE(7)
                                                   >> 534         ADD     src, src, 8
                                                   >> 535         b       1b
                                                   >> 536          ADD    dst, dst, 8
                                                   >> 537 #endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
                                                   >> 538         .if __memcpy == 1
                                                   >> 539         END(memcpy)
                                                   >> 540         .set __memcpy, 0
                                                   >> 541         .hidden __memcpy
                                                   >> 542         .endif
                                                   >> 543 
                                                   >> 544 .Ll_exc_copy\@:
                                                   >> 545         /*
                                                   >> 546          * Copy bytes from src until faulting load address (or until a
                                                   >> 547          * lb faults)
                                                   >> 548          *
                                                   >> 549          * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
                                                   >> 550          * may be more than a byte beyond the last address.
                                                   >> 551          * Hence, the lb below may get an exception.
                                                   >> 552          *
                                                   >> 553          * Assumes src < THREAD_BUADDR($28)
                                                   >> 554          */
                                                   >> 555         LOADK   t0, TI_TASK($28)
410          nop                                      556          nop
411         ldub            [%i1], %g2             !! 557         LOADK   t0, THREAD_BUADDR(t0)
412         stb             %g2, [%i0 + 4]         << 
413 1:                                                558 1:
414         ret                                    !! 559         LOADB(t1, 0(src), .Ll_exc\@)
415          restore        %g7, %g0, %o0          !! 560         ADD     src, src, 1
                                                   >> 561         sb      t1, 0(dst)      # can't fault -- we're copy_from_user
                                                   >> 562         .set    reorder                         /* DADDI_WAR */
                                                   >> 563         ADD     dst, dst, 1
                                                   >> 564         bne     src, t0, 1b
                                                   >> 565         .set    noreorder
                                                   >> 566 .Ll_exc\@:
                                                   >> 567         LOADK   t0, TI_TASK($28)
                                                   >> 568          nop
                                                   >> 569         LOADK   t0, THREAD_BUADDR(t0)   # t0 is just past last good address
                                                   >> 570          nop
                                                   >> 571         SUB     len, AT, t0             # len number of uncopied bytes
                                                   >> 572         jr      ra
                                                   >> 573          nop
416                                                   574 
417 88:     /* short_end */                        !! 575 #define SEXC(n)                                                 \
                                                   >> 576         .set    reorder;                        /* DADDI_WAR */ \
                                                   >> 577 .Ls_exc_p ## n ## u\@:                                          \
                                                   >> 578         ADD     len, len, n*NBYTES;                             \
                                                   >> 579         jr      ra;                                             \
                                                   >> 580         .set    noreorder
                                                   >> 581 
                                                   >> 582 SEXC(8)
                                                   >> 583 SEXC(7)
                                                   >> 584 SEXC(6)
                                                   >> 585 SEXC(5)
                                                   >> 586 SEXC(4)
                                                   >> 587 SEXC(3)
                                                   >> 588 SEXC(2)
                                                   >> 589 SEXC(1)
                                                   >> 590 
                                                   >> 591 .Ls_exc_p1\@:
                                                   >> 592         .set    reorder                         /* DADDI_WAR */
                                                   >> 593         ADD     len, len, 1
                                                   >> 594         jr      ra
                                                   >> 595         .set    noreorder
                                                   >> 596 .Ls_exc\@:
                                                   >> 597         jr      ra
                                                   >> 598          nop
                                                   >> 599         .endm
418                                                   600 
419         and             %o2, 0xe, %o3          !! 601 #ifndef CONFIG_HAVE_PLAT_MEMCPY
420 20:                                            !! 602         .align  5
421         sethi           %hi(89f), %o5          !! 603 LEAF(memmove)
422         sll             %o3, 3, %o4            !! 604 EXPORT_SYMBOL(memmove)
423         add             %o0, %o3, %o0          !! 605         ADD     t0, a0, a2
424         sub             %o5, %o4, %o5          !! 606         ADD     t1, a1, a2
425         add             %o1, %o3, %o1          !! 607         sltu    t0, a1, t0                      # dst + len <= src -> memcpy
426         jmpl            %o5 + %lo(89f), %g0    !! 608         sltu    t1, a0, t1                      # dst >= src + len -> memcpy
427          andcc          %o2, 1, %g0            !! 609         and     t0, t1
428                                                !! 610         beqz    t0, .L__memcpy
429         MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)  !! 611          move   v0, a0                          /* return value */
430         MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)  !! 612         beqz    a2, .Lr_out
431         MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)  !! 613         END(memmove)
432         MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)  !! 614 
433         MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)  !! 615         /* fall through to __rmemcpy */
434         MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)  !! 616 LEAF(__rmemcpy)                                 /* a0=dst a1=src a2=len */
435         MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)  !! 617          sltu   t0, a1, a0
                                                   >> 618         beqz    t0, .Lr_end_bytes_up            # src >= dst
                                                   >> 619          nop
                                                   >> 620         ADD     a0, a2                          # dst = dst + len
                                                   >> 621         ADD     a1, a2                          # src = src + len
436                                                   622 
437 89:     /* short_table_end */                  !! 623 .Lr_end_bytes:
                                                   >> 624         R10KCBARRIER(0(ra))
                                                   >> 625         lb      t0, -1(a1)
                                                   >> 626         SUB     a2, a2, 0x1
                                                   >> 627         sb      t0, -1(a0)
                                                   >> 628         SUB     a1, a1, 0x1
                                                   >> 629         .set    reorder                         /* DADDI_WAR */
                                                   >> 630         SUB     a0, a0, 0x1
                                                   >> 631         bnez    a2, .Lr_end_bytes
                                                   >> 632         .set    noreorder
                                                   >> 633 
                                                   >> 634 .Lr_out:
                                                   >> 635         jr      ra
                                                   >> 636          move   a2, zero
                                                   >> 637 
                                                   >> 638 .Lr_end_bytes_up:
                                                   >> 639         R10KCBARRIER(0(ra))
                                                   >> 640         lb      t0, (a1)
                                                   >> 641         SUB     a2, a2, 0x1
                                                   >> 642         sb      t0, (a0)
                                                   >> 643         ADD     a1, a1, 0x1
                                                   >> 644         .set    reorder                         /* DADDI_WAR */
                                                   >> 645         ADD     a0, a0, 0x1
                                                   >> 646         bnez    a2, .Lr_end_bytes_up
                                                   >> 647         .set    noreorder
                                                   >> 648 
                                                   >> 649         jr      ra
                                                   >> 650          move   a2, zero
                                                   >> 651         END(__rmemcpy)
                                                   >> 652 
                                                   >> 653 /*
                                                   >> 654  * A combined memcpy/__copy_user
                                                   >> 655  * __copy_user sets len to 0 for success; else to an upper bound of
                                                   >> 656  * the number of uncopied bytes.
                                                   >> 657  * memcpy sets v0 to dst.
                                                   >> 658  */
                                                   >> 659         .align  5
                                                   >> 660 LEAF(memcpy)                                    /* a0=dst a1=src a2=len */
                                                   >> 661 EXPORT_SYMBOL(memcpy)
                                                   >> 662         move    v0, dst                         /* return value */
                                                   >> 663 .L__memcpy:
                                                   >> 664 #ifndef CONFIG_EVA
                                                   >> 665 FEXPORT(__raw_copy_from_user)
                                                   >> 666 EXPORT_SYMBOL(__raw_copy_from_user)
                                                   >> 667 FEXPORT(__raw_copy_to_user)
                                                   >> 668 EXPORT_SYMBOL(__raw_copy_to_user)
                                                   >> 669 #endif
                                                   >> 670         /* Legacy Mode, user <-> user */
                                                   >> 671         __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
                                                   >> 672 
                                                   >> 673 #endif
                                                   >> 674 
                                                   >> 675 #ifdef CONFIG_EVA
                                                   >> 676 
                                                   >> 677 /*
                                                   >> 678  * For EVA we need distinct symbols for reading and writing to user space.
                                                   >> 679  * This is because we need to use specific EVA instructions to perform the
                                                   >> 680  * virtual <-> physical translation when a virtual address is actually in user
                                                   >> 681  * space
                                                   >> 682  */
438                                                   683 
439         be              1f                     !! 684 /*
440          nop                                   !! 685  * __copy_from_user (EVA)
                                                   >> 686  */
441                                                   687 
442         ldub            [%o1], %g2             !! 688 LEAF(__raw_copy_from_user)
443         stb             %g2, [%o0]             !! 689 EXPORT_SYMBOL(__raw_copy_from_user)
444 1:                                             !! 690         __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
445         retl                                   !! 691 END(__raw_copy_from_user)
446          mov            %g7, %o0               << 
447                                                   692 
448 90:     /* short_aligned_end */                !! 693 
449         bne             88b                    !! 694 
450          andcc          %o2, 8, %g0            !! 695 /*
451                                                !! 696  * __copy_to_user (EVA)
452         be              1f                     !! 697  */
453          andcc          %o2, 4, %g0            !! 698 
454                                                !! 699 LEAF(__raw_copy_to_user)
455         ld              [%o1 + 0x00], %g2      !! 700 EXPORT_SYMBOL(__raw_copy_to_user)
456         ld              [%o1 + 0x04], %g3      !! 701 __BUILD_COPY_USER EVA_MODE KERNELOP USEROP
457         add             %o1, 8, %o1            !! 702 END(__raw_copy_to_user)
458         st              %g2, [%o0 + 0x00]      !! 703 
459         st              %g3, [%o0 + 0x04]      !! 704 #endif
460         add             %o0, 8, %o0            << 
461 1:                                             << 
462         b               81b                    << 
463          mov            %o2, %g1               << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php