~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/uaccess_64.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_UACCESS_64_H
  3 #define _ASM_X86_UACCESS_64_H
  4 
  5 /*
  6  * User space memory access functions
  7  */
  8 #include <linux/compiler.h>
  9 #include <linux/lockdep.h>
 10 #include <linux/kasan-checks.h>
 11 #include <asm/alternative.h>
 12 #include <asm/cpufeatures.h>
 13 #include <asm/page.h>
 14 #include <asm/percpu.h>
 15 
 16 #ifdef CONFIG_ADDRESS_MASKING
 17 /*
 18  * Mask out tag bits from the address.
 19  */
 20 static inline unsigned long __untagged_addr(unsigned long addr)
 21 {
 22         asm (ALTERNATIVE("",
 23                          "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM)
 24              : [addr] "+r" (addr)
 25              : [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
 26 
 27         return addr;
 28 }
 29 
 30 #define untagged_addr(addr)     ({                                      \
 31         unsigned long __addr = (__force unsigned long)(addr);           \
 32         (__force __typeof__(addr))__untagged_addr(__addr);              \
 33 })
 34 
 35 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
 36                                                    unsigned long addr)
 37 {
 38         mmap_assert_locked(mm);
 39         return addr & (mm)->context.untag_mask;
 40 }
 41 
 42 #define untagged_addr_remote(mm, addr)  ({                              \
 43         unsigned long __addr = (__force unsigned long)(addr);           \
 44         (__force __typeof__(addr))__untagged_addr_remote(mm, __addr);   \
 45 })
 46 
 47 #endif
 48 
 49 /*
 50  * The virtual address space space is logically divided into a kernel
 51  * half and a user half.  When cast to a signed type, user pointers
 52  * are positive and kernel pointers are negative.
 53  */
 54 #define valid_user_address(x) ((__force long)(x) >= 0)
 55 
 56 /*
 57  * User pointers can have tag bits on x86-64.  This scheme tolerates
 58  * arbitrary values in those bits rather then masking them off.
 59  *
 60  * Enforce two rules:
 61  * 1. 'ptr' must be in the user half of the address space
 62  * 2. 'ptr+size' must not overflow into kernel addresses
 63  *
 64  * Note that addresses around the sign change are not valid addresses,
 65  * and will GP-fault even with LAM enabled if the sign bit is set (see
 66  * "CR3.LAM_SUP" that can narrow the canonicality check if we ever
 67  * enable it, but not remove it entirely).
 68  *
 69  * So the "overflow into kernel addresses" does not imply some sudden
 70  * exact boundary at the sign bit, and we can allow a lot of slop on the
 71  * size check.
 72  *
 73  * In fact, we could probably remove the size check entirely, since
 74  * any kernel accesses will be in increasing address order starting
 75  * at 'ptr', and even if the end might be in kernel space, we'll
 76  * hit the GP faults for non-canonical accesses before we ever get
 77  * there.
 78  *
 79  * That's a separate optimization, for now just handle the small
 80  * constant case.
 81  */
 82 static inline bool __access_ok(const void __user *ptr, unsigned long size)
 83 {
 84         if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
 85                 return valid_user_address(ptr);
 86         } else {
 87                 unsigned long sum = size + (__force unsigned long)ptr;
 88 
 89                 return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
 90         }
 91 }
 92 #define __access_ok __access_ok
 93 
 94 /*
 95  * Copy To/From Userspace
 96  */
 97 
 98 /* Handles exceptions in both to and from, but doesn't do access_ok */
 99 __must_check unsigned long
100 rep_movs_alternative(void *to, const void *from, unsigned len);
101 
102 static __always_inline __must_check unsigned long
103 copy_user_generic(void *to, const void *from, unsigned long len)
104 {
105         stac();
106         /*
107          * If CPU has FSRM feature, use 'rep movs'.
108          * Otherwise, use rep_movs_alternative.
109          */
110         asm volatile(
111                 "1:\n\t"
112                 ALTERNATIVE("rep movsb",
113                             "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
114                 "2:\n"
115                 _ASM_EXTABLE_UA(1b, 2b)
116                 :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
117                 : : "memory", "rax");
118         clac();
119         return len;
120 }
121 
122 static __always_inline __must_check unsigned long
123 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
124 {
125         return copy_user_generic(dst, (__force void *)src, size);
126 }
127 
128 static __always_inline __must_check unsigned long
129 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
130 {
131         return copy_user_generic((__force void *)dst, src, size);
132 }
133 
134 extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
135 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
136 
137 static inline int
138 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
139                                   unsigned size)
140 {
141         long ret;
142         kasan_check_write(dst, size);
143         stac();
144         ret = __copy_user_nocache(dst, src, size);
145         clac();
146         return ret;
147 }
148 
149 static inline int
150 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
151 {
152         kasan_check_write(dst, size);
153         return __copy_user_flushcache(dst, src, size);
154 }
155 
156 /*
157  * Zero Userspace.
158  */
159 
160 __must_check unsigned long
161 rep_stos_alternative(void __user *addr, unsigned long len);
162 
163 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
164 {
165         might_fault();
166         stac();
167 
168         /*
169          * No memory constraint because it doesn't change any memory gcc
170          * knows about.
171          */
172         asm volatile(
173                 "1:\n\t"
174                 ALTERNATIVE("rep stosb",
175                             "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
176                 "2:\n"
177                _ASM_EXTABLE_UA(1b, 2b)
178                : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
179                : "a" (0));
180 
181         clac();
182 
183         return size;
184 }
185 
186 static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
187 {
188         if (__access_ok(to, n))
189                 return __clear_user(to, n);
190         return n;
191 }
192 #endif /* _ASM_X86_UACCESS_64_H */
193 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php