~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/lib/uaccess.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/linkage.h>
  2 #include <linux/export.h>
  3 #include <asm/asm.h>
  4 #include <asm/asm-extable.h>
  5 #include <asm/csr.h>
  6 #include <asm/hwcap.h>
  7 #include <asm/alternative-macros.h>
  8 
  9         .macro fixup op reg addr lbl
 10 100:
 11         \op \reg, \addr
 12         _asm_extable    100b, \lbl
 13         .endm
 14 
 15 SYM_FUNC_START(__asm_copy_to_user)
 16 #ifdef CONFIG_RISCV_ISA_V
 17         ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_ZVE32X, CONFIG_RISCV_ISA_V)
 18         REG_L   t0, riscv_v_usercopy_threshold
 19         bltu    a2, t0, fallback_scalar_usercopy
 20         tail enter_vector_usercopy
 21 #endif
 22 SYM_FUNC_START(fallback_scalar_usercopy)
 23 
 24         /* Enable access to user memory */
 25         li t6, SR_SUM
 26         csrs CSR_STATUS, t6
 27 
 28         /*
 29          * Save the terminal address which will be used to compute the number
 30          * of bytes copied in case of a fixup exception.
 31          */
 32         add     t5, a0, a2
 33 
 34         /*
 35          * Register allocation for code below:
 36          * a0 - start of uncopied dst
 37          * a1 - start of uncopied src
 38          * a2 - size
 39          * t0 - end of uncopied dst
 40          */
 41         add     t0, a0, a2
 42 
 43         /*
 44          * Use byte copy only if too small.
 45          * SZREG holds 4 for RV32 and 8 for RV64
 46          */
 47         li      a3, 9*SZREG-1 /* size must >= (word_copy stride + SZREG-1) */
 48         bltu    a2, a3, .Lbyte_copy_tail
 49 
 50         /*
 51          * Copy first bytes until dst is aligned to word boundary.
 52          * a0 - start of dst
 53          * t1 - start of aligned dst
 54          */
 55         addi    t1, a0, SZREG-1
 56         andi    t1, t1, ~(SZREG-1)
 57         /* dst is already aligned, skip */
 58         beq     a0, t1, .Lskip_align_dst
 59 1:
 60         /* a5 - one byte for copying data */
 61         fixup lb      a5, 0(a1), 10f
 62         addi    a1, a1, 1       /* src */
 63         fixup sb      a5, 0(a0), 10f
 64         addi    a0, a0, 1       /* dst */
 65         bltu    a0, t1, 1b      /* t1 - start of aligned dst */
 66 
 67 .Lskip_align_dst:
 68         /*
 69          * Now dst is aligned.
 70          * Use shift-copy if src is misaligned.
 71          * Use word-copy if both src and dst are aligned because
 72          * can not use shift-copy which do not require shifting
 73          */
 74         /* a1 - start of src */
 75         andi    a3, a1, SZREG-1
 76         bnez    a3, .Lshift_copy
 77 
 78 .Lword_copy:
 79         /*
 80          * Both src and dst are aligned, unrolled word copy
 81          *
 82          * a0 - start of aligned dst
 83          * a1 - start of aligned src
 84          * t0 - end of aligned dst
 85          */
 86         addi    t0, t0, -(8*SZREG) /* not to over run */
 87 2:
 88         fixup REG_L   a4,        0(a1), 10f
 89         fixup REG_L   a5,    SZREG(a1), 10f
 90         fixup REG_L   a6,  2*SZREG(a1), 10f
 91         fixup REG_L   a7,  3*SZREG(a1), 10f
 92         fixup REG_L   t1,  4*SZREG(a1), 10f
 93         fixup REG_L   t2,  5*SZREG(a1), 10f
 94         fixup REG_L   t3,  6*SZREG(a1), 10f
 95         fixup REG_L   t4,  7*SZREG(a1), 10f
 96         fixup REG_S   a4,        0(a0), 10f
 97         fixup REG_S   a5,    SZREG(a0), 10f
 98         fixup REG_S   a6,  2*SZREG(a0), 10f
 99         fixup REG_S   a7,  3*SZREG(a0), 10f
100         fixup REG_S   t1,  4*SZREG(a0), 10f
101         fixup REG_S   t2,  5*SZREG(a0), 10f
102         fixup REG_S   t3,  6*SZREG(a0), 10f
103         fixup REG_S   t4,  7*SZREG(a0), 10f
104         addi    a0, a0, 8*SZREG
105         addi    a1, a1, 8*SZREG
106         bleu    a0, t0, 2b
107 
108         addi    t0, t0, 8*SZREG /* revert to original value */
109         j       .Lbyte_copy_tail
110 
111 .Lshift_copy:
112 
113         /*
114          * Word copy with shifting.
115          * For misaligned copy we still perform aligned word copy, but
116          * we need to use the value fetched from the previous iteration and
117          * do some shifts.
118          * This is safe because reading is less than a word size.
119          *
120          * a0 - start of aligned dst
121          * a1 - start of src
122          * a3 - a1 & mask:(SZREG-1)
123          * t0 - end of uncopied dst
124          * t1 - end of aligned dst
125          */
126         /* calculating aligned word boundary for dst */
127         andi    t1, t0, ~(SZREG-1)
128         /* Converting unaligned src to aligned src */
129         andi    a1, a1, ~(SZREG-1)
130 
131         /*
132          * Calculate shifts
133          * t3 - prev shift
134          * t4 - current shift
135          */
136         slli    t3, a3, 3 /* converting bytes in a3 to bits */
137         li      a5, SZREG*8
138         sub     t4, a5, t3
139 
140         /* Load the first word to combine with second word */
141         fixup REG_L   a5, 0(a1), 10f
142 
143 3:
144         /* Main shifting copy
145          *
146          * a0 - start of aligned dst
147          * a1 - start of aligned src
148          * t1 - end of aligned dst
149          */
150 
151         /* At least one iteration will be executed */
152         srl     a4, a5, t3
153         fixup REG_L   a5, SZREG(a1), 10f
154         addi    a1, a1, SZREG
155         sll     a2, a5, t4
156         or      a2, a2, a4
157         fixup REG_S   a2, 0(a0), 10f
158         addi    a0, a0, SZREG
159         bltu    a0, t1, 3b
160 
161         /* Revert src to original unaligned value  */
162         add     a1, a1, a3
163 
164 .Lbyte_copy_tail:
165         /*
166          * Byte copy anything left.
167          *
168          * a0 - start of remaining dst
169          * a1 - start of remaining src
170          * t0 - end of remaining dst
171          */
172         bgeu    a0, t0, .Lout_copy_user  /* check if end of copy */
173 4:
174         fixup lb      a5, 0(a1), 10f
175         addi    a1, a1, 1       /* src */
176         fixup sb      a5, 0(a0), 10f
177         addi    a0, a0, 1       /* dst */
178         bltu    a0, t0, 4b      /* t0 - end of dst */
179 
180 .Lout_copy_user:
181         /* Disable access to user memory */
182         csrc CSR_STATUS, t6
183         li      a0, 0
184         ret
185 
186         /* Exception fixup code */
187 10:
188         /* Disable access to user memory */
189         csrc CSR_STATUS, t6
190         sub a0, t5, a0
191         ret
192 SYM_FUNC_END(__asm_copy_to_user)
193 SYM_FUNC_END(fallback_scalar_usercopy)
194 EXPORT_SYMBOL(__asm_copy_to_user)
195 SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
196 EXPORT_SYMBOL(__asm_copy_from_user)
197 
198 
199 SYM_FUNC_START(__clear_user)
200 
201         /* Enable access to user memory */
202         li t6, SR_SUM
203         csrs CSR_STATUS, t6
204 
205         add a3, a0, a1
206         addi t0, a0, SZREG-1
207         andi t1, a3, ~(SZREG-1)
208         andi t0, t0, ~(SZREG-1)
209         /*
210          * a3: terminal address of target region
211          * t0: lowest doubleword-aligned address in target region
212          * t1: highest doubleword-aligned address in target region
213          */
214         bgeu t0, t1, 2f
215         bltu a0, t0, 4f
216 1:
217         fixup REG_S, zero, (a0), 11f
218         addi a0, a0, SZREG
219         bltu a0, t1, 1b
220 2:
221         bltu a0, a3, 5f
222 
223 3:
224         /* Disable access to user memory */
225         csrc CSR_STATUS, t6
226         li a0, 0
227         ret
228 4: /* Edge case: unalignment */
229         fixup sb, zero, (a0), 11f
230         addi a0, a0, 1
231         bltu a0, t0, 4b
232         j 1b
233 5: /* Edge case: remainder */
234         fixup sb, zero, (a0), 11f
235         addi a0, a0, 1
236         bltu a0, a3, 5b
237         j 3b
238 
239         /* Exception fixup code */
240 11:
241         /* Disable access to user memory */
242         csrc CSR_STATUS, t6
243         sub a0, a3, a0
244         ret
245 SYM_FUNC_END(__clear_user)
246 EXPORT_SYMBOL(__clear_user)

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php