1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * fs/userfaultfd.c 3 * fs/userfaultfd.c 4 * 4 * 5 * Copyright (C) 2007 Davide Libenzi <davide 5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 6 * Copyright (C) 2008-2009 Red Hat, Inc. 6 * Copyright (C) 2008-2009 Red Hat, Inc. 7 * Copyright (C) 2015 Red Hat, Inc. 7 * Copyright (C) 2015 Red Hat, Inc. 8 * 8 * 9 * Some part derived from fs/eventfd.c (anon 9 * Some part derived from fs/eventfd.c (anon inode setup) and 10 * mm/ksm.c (mm hashing). 10 * mm/ksm.c (mm hashing). 11 */ 11 */ 12 12 13 #include <linux/list.h> 13 #include <linux/list.h> 14 #include <linux/hashtable.h> 14 #include <linux/hashtable.h> 15 #include <linux/sched/signal.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 16 #include <linux/sched/mm.h> 17 #include <linux/mm.h> 17 #include <linux/mm.h> 18 #include <linux/mm_inline.h> 18 #include <linux/mm_inline.h> 19 #include <linux/mmu_notifier.h> 19 #include <linux/mmu_notifier.h> 20 #include <linux/poll.h> 20 #include <linux/poll.h> 21 #include <linux/slab.h> 21 #include <linux/slab.h> 22 #include <linux/seq_file.h> 22 #include <linux/seq_file.h> 23 #include <linux/file.h> 23 #include <linux/file.h> 24 #include <linux/bug.h> 24 #include <linux/bug.h> 25 #include <linux/anon_inodes.h> 25 #include <linux/anon_inodes.h> 26 #include <linux/syscalls.h> 26 #include <linux/syscalls.h> 27 #include <linux/userfaultfd_k.h> 27 #include <linux/userfaultfd_k.h> 28 #include <linux/mempolicy.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ioctl.h> 29 #include <linux/ioctl.h> 30 #include <linux/security.h> 30 #include <linux/security.h> 31 #include <linux/hugetlb.h> 31 #include <linux/hugetlb.h> 32 #include <linux/swapops.h> 32 #include <linux/swapops.h> 33 #include <linux/miscdevice.h> << 34 #include <linux/uio.h> << 35 33 36 static int sysctl_unprivileged_userfaultfd __r !! 34 int sysctl_unprivileged_userfaultfd __read_mostly; 37 35 38 #ifdef CONFIG_SYSCTL !! 36 static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; 39 static struct ctl_table vm_userfaultfd_table[] << 40 { << 41 .procname = "unprivilege << 42 .data = &sysctl_unpr << 43 .maxlen = sizeof(sysct << 44 .mode = 0644, << 45 .proc_handler = proc_dointve << 46 .extra1 = SYSCTL_ZERO, << 47 .extra2 = SYSCTL_ONE, << 48 }, << 49 }; << 50 #endif << 51 37 52 static struct kmem_cache *userfaultfd_ctx_cach !! 38 /* >> 39 * Start with fault_pending_wqh and fault_wqh so they're more likely >> 40 * to be in the same cacheline. >> 41 * >> 42 * Locking order: >> 43 * fd_wqh.lock >> 44 * fault_pending_wqh.lock >> 45 * fault_wqh.lock >> 46 * event_wqh.lock >> 47 * >> 48 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, >> 49 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's >> 50 * also taken in IRQ context. >> 51 */ >> 52 struct userfaultfd_ctx { >> 53 /* waitqueue head for the pending (i.e. not read) userfaults */ >> 54 wait_queue_head_t fault_pending_wqh; >> 55 /* waitqueue head for the userfaults */ >> 56 wait_queue_head_t fault_wqh; >> 57 /* waitqueue head for the pseudo fd to wakeup poll/read */ >> 58 wait_queue_head_t fd_wqh; >> 59 /* waitqueue head for events */ >> 60 wait_queue_head_t event_wqh; >> 61 /* a refile sequence protected by fault_pending_wqh lock */ >> 62 seqcount_spinlock_t refile_seq; >> 63 /* pseudo fd refcounting */ >> 64 refcount_t refcount; >> 65 /* userfaultfd syscall flags */ >> 66 unsigned int flags; >> 67 /* features requested from the userspace */ >> 68 unsigned int features; >> 69 /* released */ >> 70 bool released; >> 71 /* memory mappings are changing because of non-cooperative event */ >> 72 atomic_t mmap_changing; >> 73 /* mm with one ore more vmas attached to this userfaultfd_ctx */ >> 74 struct mm_struct *mm; >> 75 }; 53 76 54 struct userfaultfd_fork_ctx { 77 struct userfaultfd_fork_ctx { 55 struct userfaultfd_ctx *orig; 78 struct userfaultfd_ctx *orig; 56 struct userfaultfd_ctx *new; 79 struct userfaultfd_ctx *new; 57 struct list_head list; 80 struct list_head list; 58 }; 81 }; 59 82 60 struct userfaultfd_unmap_ctx { 83 struct userfaultfd_unmap_ctx { 61 struct userfaultfd_ctx *ctx; 84 struct userfaultfd_ctx *ctx; 62 unsigned long start; 85 unsigned long start; 63 unsigned long end; 86 unsigned long end; 64 struct list_head list; 87 struct list_head list; 65 }; 88 }; 66 89 67 struct userfaultfd_wait_queue { 90 struct userfaultfd_wait_queue { 68 struct uffd_msg msg; 91 struct uffd_msg msg; 69 wait_queue_entry_t wq; 92 wait_queue_entry_t wq; 70 struct userfaultfd_ctx *ctx; 93 struct userfaultfd_ctx *ctx; 71 bool waken; 94 bool waken; 72 }; 95 }; 73 96 74 struct userfaultfd_wake_range { 97 struct userfaultfd_wake_range { 75 unsigned long start; 98 unsigned long start; 76 unsigned long len; 99 unsigned long len; 77 }; 100 }; 78 101 79 /* internal indication that UFFD_API ioctl was 102 /* internal indication that UFFD_API ioctl was successfully executed */ 80 #define UFFD_FEATURE_INITIALIZED 103 #define UFFD_FEATURE_INITIALIZED (1u << 31) 81 104 82 static bool userfaultfd_is_initialized(struct 105 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) 83 { 106 { 84 return ctx->features & UFFD_FEATURE_IN 107 return ctx->features & UFFD_FEATURE_INITIALIZED; 85 } 108 } 86 109 87 static bool userfaultfd_wp_async_ctx(struct us << 88 { << 89 return ctx && (ctx->features & UFFD_FE << 90 } << 91 << 92 /* << 93 * Whether WP_UNPOPULATED is enabled on the uf << 94 * meaningful when userfaultfd_wp()==true on t << 95 * anonymous. << 96 */ << 97 bool userfaultfd_wp_unpopulated(struct vm_area << 98 { << 99 struct userfaultfd_ctx *ctx = vma->vm_ << 100 << 101 if (!ctx) << 102 return false; << 103 << 104 return ctx->features & UFFD_FEATURE_WP << 105 } << 106 << 107 static void userfaultfd_set_vm_flags(struct vm << 108 vm_flags_ << 109 { << 110 const bool uffd_wp_changed = (vma->vm_ << 111 << 112 vm_flags_reset(vma, flags); << 113 /* << 114 * For shared mappings, we want to ena << 115 * userfaultfd-wp is enabled (see vma_ << 116 * recalculate vma->vm_page_prot whene << 117 */ << 118 if ((vma->vm_flags & VM_SHARED) && uff << 119 vma_set_page_prot(vma); << 120 } << 121 << 122 static int userfaultfd_wake_function(wait_queu 110 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, 123 int wake_ 111 int wake_flags, void *key) 124 { 112 { 125 struct userfaultfd_wake_range *range = 113 struct userfaultfd_wake_range *range = key; 126 int ret; 114 int ret; 127 struct userfaultfd_wait_queue *uwq; 115 struct userfaultfd_wait_queue *uwq; 128 unsigned long start, len; 116 unsigned long start, len; 129 117 130 uwq = container_of(wq, struct userfaul 118 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 131 ret = 0; 119 ret = 0; 132 /* len == 0 means wake all */ 120 /* len == 0 means wake all */ 133 start = range->start; 121 start = range->start; 134 len = range->len; 122 len = range->len; 135 if (len && (start > uwq->msg.arg.pagef 123 if (len && (start > uwq->msg.arg.pagefault.address || 136 start + len <= uwq->msg.ar 124 start + len <= uwq->msg.arg.pagefault.address)) 137 goto out; 125 goto out; 138 WRITE_ONCE(uwq->waken, true); 126 WRITE_ONCE(uwq->waken, true); 139 /* 127 /* 140 * The Program-Order guarantees provid 128 * The Program-Order guarantees provided by the scheduler 141 * ensure uwq->waken is visible before 129 * ensure uwq->waken is visible before the task is woken. 142 */ 130 */ 143 ret = wake_up_state(wq->private, mode) 131 ret = wake_up_state(wq->private, mode); 144 if (ret) { 132 if (ret) { 145 /* 133 /* 146 * Wake only once, autoremove 134 * Wake only once, autoremove behavior. 147 * 135 * 148 * After the effect of list_de 136 * After the effect of list_del_init is visible to the other 149 * CPUs, the waitqueue may dis 137 * CPUs, the waitqueue may disappear from under us, see the 150 * !list_empty_careful() in ha 138 * !list_empty_careful() in handle_userfault(). 151 * 139 * 152 * try_to_wake_up() has an imp 140 * try_to_wake_up() has an implicit smp_mb(), and the 153 * wq->private is read before 141 * wq->private is read before calling the extern function 154 * "wake_up_state" (which in t 142 * "wake_up_state" (which in turns calls try_to_wake_up). 155 */ 143 */ 156 list_del_init(&wq->entry); 144 list_del_init(&wq->entry); 157 } 145 } 158 out: 146 out: 159 return ret; 147 return ret; 160 } 148 } 161 149 162 /** 150 /** 163 * userfaultfd_ctx_get - Acquires a reference 151 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd 164 * context. 152 * context. 165 * @ctx: [in] Pointer to the userfaultfd conte 153 * @ctx: [in] Pointer to the userfaultfd context. 166 */ 154 */ 167 static void userfaultfd_ctx_get(struct userfau 155 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) 168 { 156 { 169 refcount_inc(&ctx->refcount); 157 refcount_inc(&ctx->refcount); 170 } 158 } 171 159 172 /** 160 /** 173 * userfaultfd_ctx_put - Releases a reference 161 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd 174 * context. 162 * context. 175 * @ctx: [in] Pointer to userfaultfd context. 163 * @ctx: [in] Pointer to userfaultfd context. 176 * 164 * 177 * The userfaultfd context reference must have 165 * The userfaultfd context reference must have been previously acquired either 178 * with userfaultfd_ctx_get() or userfaultfd_c 166 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). 179 */ 167 */ 180 static void userfaultfd_ctx_put(struct userfau 168 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) 181 { 169 { 182 if (refcount_dec_and_test(&ctx->refcou 170 if (refcount_dec_and_test(&ctx->refcount)) { 183 VM_BUG_ON(spin_is_locked(&ctx- 171 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); 184 VM_BUG_ON(waitqueue_active(&ct 172 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); 185 VM_BUG_ON(spin_is_locked(&ctx- 173 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); 186 VM_BUG_ON(waitqueue_active(&ct 174 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); 187 VM_BUG_ON(spin_is_locked(&ctx- 175 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); 188 VM_BUG_ON(waitqueue_active(&ct 176 VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); 189 VM_BUG_ON(spin_is_locked(&ctx- 177 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); 190 VM_BUG_ON(waitqueue_active(&ct 178 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); 191 mmdrop(ctx->mm); 179 mmdrop(ctx->mm); 192 kmem_cache_free(userfaultfd_ct 180 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 193 } 181 } 194 } 182 } 195 183 196 static inline void msg_init(struct uffd_msg *m 184 static inline void msg_init(struct uffd_msg *msg) 197 { 185 { 198 BUILD_BUG_ON(sizeof(struct uffd_msg) ! 186 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); 199 /* 187 /* 200 * Must use memset to zero out the pad 188 * Must use memset to zero out the paddings or kernel data is 201 * leaked to userland. 189 * leaked to userland. 202 */ 190 */ 203 memset(msg, 0, sizeof(struct uffd_msg) 191 memset(msg, 0, sizeof(struct uffd_msg)); 204 } 192 } 205 193 206 static inline struct uffd_msg userfault_msg(un 194 static inline struct uffd_msg userfault_msg(unsigned long address, 207 un 195 unsigned long real_address, 208 un 196 unsigned int flags, 209 un 197 unsigned long reason, 210 un 198 unsigned int features) 211 { 199 { 212 struct uffd_msg msg; 200 struct uffd_msg msg; 213 201 214 msg_init(&msg); 202 msg_init(&msg); 215 msg.event = UFFD_EVENT_PAGEFAULT; 203 msg.event = UFFD_EVENT_PAGEFAULT; 216 204 217 msg.arg.pagefault.address = (features 205 msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ? 218 real_addre 206 real_address : address; 219 207 220 /* 208 /* 221 * These flags indicate why the userfa 209 * These flags indicate why the userfault occurred: 222 * - UFFD_PAGEFAULT_FLAG_WP indicates 210 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. 223 * - UFFD_PAGEFAULT_FLAG_MINOR indicat 211 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. 224 * - Neither of these flags being set 212 * - Neither of these flags being set indicates a MISSING fault. 225 * 213 * 226 * Separately, UFFD_PAGEFAULT_FLAG_WRI 214 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write 227 * fault. Otherwise, it was a read fau 215 * fault. Otherwise, it was a read fault. 228 */ 216 */ 229 if (flags & FAULT_FLAG_WRITE) 217 if (flags & FAULT_FLAG_WRITE) 230 msg.arg.pagefault.flags |= UFF 218 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; 231 if (reason & VM_UFFD_WP) 219 if (reason & VM_UFFD_WP) 232 msg.arg.pagefault.flags |= UFF 220 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; 233 if (reason & VM_UFFD_MINOR) 221 if (reason & VM_UFFD_MINOR) 234 msg.arg.pagefault.flags |= UFF 222 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; 235 if (features & UFFD_FEATURE_THREAD_ID) 223 if (features & UFFD_FEATURE_THREAD_ID) 236 msg.arg.pagefault.feat.ptid = 224 msg.arg.pagefault.feat.ptid = task_pid_vnr(current); 237 return msg; 225 return msg; 238 } 226 } 239 227 240 #ifdef CONFIG_HUGETLB_PAGE 228 #ifdef CONFIG_HUGETLB_PAGE 241 /* 229 /* 242 * Same functionality as userfaultfd_must_wait 230 * Same functionality as userfaultfd_must_wait below with modifications for 243 * hugepmd ranges. 231 * hugepmd ranges. 244 */ 232 */ 245 static inline bool userfaultfd_huge_must_wait( 233 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 246 !! 234 struct vm_area_struct *vma, 247 !! 235 unsigned long address, >> 236 unsigned long flags, >> 237 unsigned long reason) 248 { 238 { 249 struct vm_area_struct *vma = vmf->vma; !! 239 struct mm_struct *mm = ctx->mm; 250 pte_t *ptep, pte; 240 pte_t *ptep, pte; 251 bool ret = true; 241 bool ret = true; 252 242 253 assert_fault_locked(vmf); !! 243 mmap_assert_locked(mm); >> 244 >> 245 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); 254 246 255 ptep = hugetlb_walk(vma, vmf->address, << 256 if (!ptep) 247 if (!ptep) 257 goto out; 248 goto out; 258 249 259 ret = false; 250 ret = false; 260 pte = huge_ptep_get(vma->vm_mm, vmf->a !! 251 pte = huge_ptep_get(ptep); 261 252 262 /* 253 /* 263 * Lockless access: we're in a wait_ev 254 * Lockless access: we're in a wait_event so it's ok if it 264 * changes under us. PTE markers shou 255 * changes under us. PTE markers should be handled the same as none 265 * ptes here. 256 * ptes here. 266 */ 257 */ 267 if (huge_pte_none_mostly(pte)) 258 if (huge_pte_none_mostly(pte)) 268 ret = true; 259 ret = true; 269 if (!huge_pte_write(pte) && (reason & 260 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) 270 ret = true; 261 ret = true; 271 out: 262 out: 272 return ret; 263 return ret; 273 } 264 } 274 #else 265 #else 275 static inline bool userfaultfd_huge_must_wait( 266 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 276 !! 267 struct vm_area_struct *vma, 277 !! 268 unsigned long address, >> 269 unsigned long flags, >> 270 unsigned long reason) 278 { 271 { 279 return false; /* should never get he 272 return false; /* should never get here */ 280 } 273 } 281 #endif /* CONFIG_HUGETLB_PAGE */ 274 #endif /* CONFIG_HUGETLB_PAGE */ 282 275 283 /* 276 /* 284 * Verify the pagetables are still not ok afte 277 * Verify the pagetables are still not ok after having reigstered into 285 * the fault_pending_wqh to avoid userland hav 278 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any 286 * userfault that has already been resolved, i !! 279 * userfault that has already been resolved, if userfaultfd_read and 287 * UFFDIO_COPY|ZEROPAGE are being run simultan 280 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different 288 * threads. 281 * threads. 289 */ 282 */ 290 static inline bool userfaultfd_must_wait(struc 283 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, 291 struc !! 284 unsigned long address, >> 285 unsigned long flags, 292 unsig 286 unsigned long reason) 293 { 287 { 294 struct mm_struct *mm = ctx->mm; 288 struct mm_struct *mm = ctx->mm; 295 unsigned long address = vmf->address; << 296 pgd_t *pgd; 289 pgd_t *pgd; 297 p4d_t *p4d; 290 p4d_t *p4d; 298 pud_t *pud; 291 pud_t *pud; 299 pmd_t *pmd, _pmd; 292 pmd_t *pmd, _pmd; 300 pte_t *pte; 293 pte_t *pte; 301 pte_t ptent; << 302 bool ret = true; 294 bool ret = true; 303 295 304 assert_fault_locked(vmf); !! 296 mmap_assert_locked(mm); 305 297 306 pgd = pgd_offset(mm, address); 298 pgd = pgd_offset(mm, address); 307 if (!pgd_present(*pgd)) 299 if (!pgd_present(*pgd)) 308 goto out; 300 goto out; 309 p4d = p4d_offset(pgd, address); 301 p4d = p4d_offset(pgd, address); 310 if (!p4d_present(*p4d)) 302 if (!p4d_present(*p4d)) 311 goto out; 303 goto out; 312 pud = pud_offset(p4d, address); 304 pud = pud_offset(p4d, address); 313 if (!pud_present(*pud)) 305 if (!pud_present(*pud)) 314 goto out; 306 goto out; 315 pmd = pmd_offset(pud, address); 307 pmd = pmd_offset(pud, address); 316 again: !! 308 /* 317 _pmd = pmdp_get_lockless(pmd); !! 309 * READ_ONCE must function as a barrier with narrower scope >> 310 * and it must be equivalent to: >> 311 * _pmd = *pmd; barrier(); >> 312 * >> 313 * This is to deal with the instability (as in >> 314 * pmd_trans_unstable) of the pmd. >> 315 */ >> 316 _pmd = READ_ONCE(*pmd); 318 if (pmd_none(_pmd)) 317 if (pmd_none(_pmd)) 319 goto out; 318 goto out; 320 319 321 ret = false; 320 ret = false; 322 if (!pmd_present(_pmd) || pmd_devmap(_ !! 321 if (!pmd_present(_pmd)) 323 goto out; 322 goto out; 324 323 325 if (pmd_trans_huge(_pmd)) { 324 if (pmd_trans_huge(_pmd)) { 326 if (!pmd_write(_pmd) && (reaso 325 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) 327 ret = true; 326 ret = true; 328 goto out; 327 goto out; 329 } 328 } 330 329 >> 330 /* >> 331 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it >> 332 * and use the standard pte_offset_map() instead of parsing _pmd. >> 333 */ 331 pte = pte_offset_map(pmd, address); 334 pte = pte_offset_map(pmd, address); 332 if (!pte) { << 333 ret = true; << 334 goto again; << 335 } << 336 /* 335 /* 337 * Lockless access: we're in a wait_ev 336 * Lockless access: we're in a wait_event so it's ok if it 338 * changes under us. PTE markers shou 337 * changes under us. PTE markers should be handled the same as none 339 * ptes here. 338 * ptes here. 340 */ 339 */ 341 ptent = ptep_get(pte); !! 340 if (pte_none_mostly(*pte)) 342 if (pte_none_mostly(ptent)) << 343 ret = true; 341 ret = true; 344 if (!pte_write(ptent) && (reason & VM_ !! 342 if (!pte_write(*pte) && (reason & VM_UFFD_WP)) 345 ret = true; 343 ret = true; 346 pte_unmap(pte); 344 pte_unmap(pte); 347 345 348 out: 346 out: 349 return ret; 347 return ret; 350 } 348 } 351 349 352 static inline unsigned int userfaultfd_get_blo 350 static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) 353 { 351 { 354 if (flags & FAULT_FLAG_INTERRUPTIBLE) 352 if (flags & FAULT_FLAG_INTERRUPTIBLE) 355 return TASK_INTERRUPTIBLE; 353 return TASK_INTERRUPTIBLE; 356 354 357 if (flags & FAULT_FLAG_KILLABLE) 355 if (flags & FAULT_FLAG_KILLABLE) 358 return TASK_KILLABLE; 356 return TASK_KILLABLE; 359 357 360 return TASK_UNINTERRUPTIBLE; 358 return TASK_UNINTERRUPTIBLE; 361 } 359 } 362 360 363 /* 361 /* 364 * The locking rules involved in returning VM_ 362 * The locking rules involved in returning VM_FAULT_RETRY depending on 365 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NO 363 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and 366 * FAULT_FLAG_KILLABLE are not straightforward 364 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" 367 * recommendation in __lock_page_or_retry is n 365 * recommendation in __lock_page_or_retry is not an understatement. 368 * 366 * 369 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_ 367 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released 370 * before returning VM_FAULT_RETRY only if FAU 368 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is 371 * not set. 369 * not set. 372 * 370 * 373 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_ 371 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not 374 * set, VM_FAULT_RETRY can still be returned i 372 * set, VM_FAULT_RETRY can still be returned if and only if there are 375 * fatal_signal_pending()s, and the mmap_lock 373 * fatal_signal_pending()s, and the mmap_lock must be released before 376 * returning it. 374 * returning it. 377 */ 375 */ 378 vm_fault_t handle_userfault(struct vm_fault *v 376 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) 379 { 377 { 380 struct vm_area_struct *vma = vmf->vma; !! 378 struct mm_struct *mm = vmf->vma->vm_mm; 381 struct mm_struct *mm = vma->vm_mm; << 382 struct userfaultfd_ctx *ctx; 379 struct userfaultfd_ctx *ctx; 383 struct userfaultfd_wait_queue uwq; 380 struct userfaultfd_wait_queue uwq; 384 vm_fault_t ret = VM_FAULT_SIGBUS; 381 vm_fault_t ret = VM_FAULT_SIGBUS; 385 bool must_wait; 382 bool must_wait; 386 unsigned int blocking_state; 383 unsigned int blocking_state; 387 384 388 /* 385 /* 389 * We don't do userfault handling for 386 * We don't do userfault handling for the final child pid update. 390 * 387 * 391 * We also don't do userfault handling 388 * We also don't do userfault handling during 392 * coredumping. hugetlbfs has the spec 389 * coredumping. hugetlbfs has the special 393 * hugetlb_follow_page_mask() to skip !! 390 * follow_hugetlb_page() to skip missing pages in the 394 * FOLL_DUMP case, anon memory also ch 391 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with 395 * the no_page_table() helper in follo 392 * the no_page_table() helper in follow_page_mask(), but the 396 * shmem_vm_ops->fault method is invok 393 * shmem_vm_ops->fault method is invoked even during 397 * coredumping and it ends up here. !! 394 * coredumping without mmap_lock and it ends up here. 398 */ 395 */ 399 if (current->flags & (PF_EXITING|PF_DU 396 if (current->flags & (PF_EXITING|PF_DUMPCORE)) 400 goto out; 397 goto out; 401 398 402 assert_fault_locked(vmf); !! 399 /* >> 400 * Coredumping runs without mmap_lock so we can only check that >> 401 * the mmap_lock is held, if PF_DUMPCORE was not set. >> 402 */ >> 403 mmap_assert_locked(mm); 403 404 404 ctx = vma->vm_userfaultfd_ctx.ctx; !! 405 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; 405 if (!ctx) 406 if (!ctx) 406 goto out; 407 goto out; 407 408 408 BUG_ON(ctx->mm != mm); 409 BUG_ON(ctx->mm != mm); 409 410 410 /* Any unrecognized flag is a bug. */ 411 /* Any unrecognized flag is a bug. */ 411 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); 412 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); 412 /* 0 or > 1 flags set is a bug; we exp 413 /* 0 or > 1 flags set is a bug; we expect exactly 1. */ 413 VM_BUG_ON(!reason || (reason & (reason 414 VM_BUG_ON(!reason || (reason & (reason - 1))); 414 415 415 if (ctx->features & UFFD_FEATURE_SIGBU 416 if (ctx->features & UFFD_FEATURE_SIGBUS) 416 goto out; 417 goto out; 417 if (!(vmf->flags & FAULT_FLAG_USER) && !! 418 if ((vmf->flags & FAULT_FLAG_USER) == 0 && >> 419 ctx->flags & UFFD_USER_MODE_ONLY) { >> 420 printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " >> 421 "sysctl knob to 1 if kernel faults must be handled " >> 422 "without obtaining CAP_SYS_PTRACE capability\n"); 418 goto out; 423 goto out; >> 424 } 419 425 420 /* 426 /* 421 * If it's already released don't get 427 * If it's already released don't get it. This avoids to loop 422 * in __get_user_pages if userfaultfd_ 428 * in __get_user_pages if userfaultfd_release waits on the 423 * caller of handle_userfault to relea 429 * caller of handle_userfault to release the mmap_lock. 424 */ 430 */ 425 if (unlikely(READ_ONCE(ctx->released)) 431 if (unlikely(READ_ONCE(ctx->released))) { 426 /* 432 /* 427 * Don't return VM_FAULT_SIGBU 433 * Don't return VM_FAULT_SIGBUS in this case, so a non 428 * cooperative manager can clo 434 * cooperative manager can close the uffd after the 429 * last UFFDIO_COPY, without r 435 * last UFFDIO_COPY, without risking to trigger an 430 * involuntary SIGBUS if the p 436 * involuntary SIGBUS if the process was starting the 431 * userfaultfd while the userf 437 * userfaultfd while the userfaultfd was still armed 432 * (but after the last UFFDIO_ 438 * (but after the last UFFDIO_COPY). If the uffd 433 * wasn't already closed when 439 * wasn't already closed when the userfault reached 434 * this point, that would norm 440 * this point, that would normally be solved by 435 * userfaultfd_must_wait retur 441 * userfaultfd_must_wait returning 'false'. 436 * 442 * 437 * If we were to return VM_FAU 443 * If we were to return VM_FAULT_SIGBUS here, the non 438 * cooperative manager would b 444 * cooperative manager would be instead forced to 439 * always call UFFDIO_UNREGIST 445 * always call UFFDIO_UNREGISTER before it can safely 440 * close the uffd. 446 * close the uffd. 441 */ 447 */ 442 ret = VM_FAULT_NOPAGE; 448 ret = VM_FAULT_NOPAGE; 443 goto out; 449 goto out; 444 } 450 } 445 451 446 /* 452 /* 447 * Check that we can return VM_FAULT_R 453 * Check that we can return VM_FAULT_RETRY. 448 * 454 * 449 * NOTE: it should become possible to 455 * NOTE: it should become possible to return VM_FAULT_RETRY 450 * even if FAULT_FLAG_TRIED is set wit 456 * even if FAULT_FLAG_TRIED is set without leading to gup() 451 * -EBUSY failures, if the userfaultfd 457 * -EBUSY failures, if the userfaultfd is to be extended for 452 * VM_UFFD_WP tracking and we intend t 458 * VM_UFFD_WP tracking and we intend to arm the userfault 453 * without first stopping userland acc 459 * without first stopping userland access to the memory. For 454 * VM_UFFD_MISSING userfaults this is 460 * VM_UFFD_MISSING userfaults this is enough for now. 455 */ 461 */ 456 if (unlikely(!(vmf->flags & FAULT_FLAG 462 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { 457 /* 463 /* 458 * Validate the invariant that 464 * Validate the invariant that nowait must allow retry 459 * to be sure not to return SI 465 * to be sure not to return SIGBUS erroneously on 460 * nowait invocations. 466 * nowait invocations. 461 */ 467 */ 462 BUG_ON(vmf->flags & FAULT_FLAG 468 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); 463 #ifdef CONFIG_DEBUG_VM 469 #ifdef CONFIG_DEBUG_VM 464 if (printk_ratelimit()) { 470 if (printk_ratelimit()) { 465 printk(KERN_WARNING 471 printk(KERN_WARNING 466 "FAULT_FLAG_ALL 472 "FAULT_FLAG_ALLOW_RETRY missing %x\n", 467 vmf->flags); 473 vmf->flags); 468 dump_stack(); 474 dump_stack(); 469 } 475 } 470 #endif 476 #endif 471 goto out; 477 goto out; 472 } 478 } 473 479 474 /* 480 /* 475 * Handle nowait, not much to do other 481 * Handle nowait, not much to do other than tell it to retry 476 * and wait. 482 * and wait. 477 */ 483 */ 478 ret = VM_FAULT_RETRY; 484 ret = VM_FAULT_RETRY; 479 if (vmf->flags & FAULT_FLAG_RETRY_NOWA 485 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 480 goto out; 486 goto out; 481 487 482 /* take the reference before dropping 488 /* take the reference before dropping the mmap_lock */ 483 userfaultfd_ctx_get(ctx); 489 userfaultfd_ctx_get(ctx); 484 490 485 init_waitqueue_func_entry(&uwq.wq, use 491 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); 486 uwq.wq.private = current; 492 uwq.wq.private = current; 487 uwq.msg = userfault_msg(vmf->address, 493 uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags, 488 reason, ctx->f 494 reason, ctx->features); 489 uwq.ctx = ctx; 495 uwq.ctx = ctx; 490 uwq.waken = false; 496 uwq.waken = false; 491 497 492 blocking_state = userfaultfd_get_block 498 blocking_state = userfaultfd_get_blocking_state(vmf->flags); 493 499 494 /* << 495 * Take the vma lock now, in order to << 496 * userfaultfd_huge_must_wait() later. << 497 * (sleepable) vma lock can modify the << 498 * must be before explicitly calling s << 499 */ << 500 if (is_vm_hugetlb_page(vma)) << 501 hugetlb_vma_lock_read(vma); << 502 << 503 spin_lock_irq(&ctx->fault_pending_wqh. 500 spin_lock_irq(&ctx->fault_pending_wqh.lock); 504 /* 501 /* 505 * After the __add_wait_queue the uwq 502 * After the __add_wait_queue the uwq is visible to userland 506 * through poll/read(). 503 * through poll/read(). 507 */ 504 */ 508 __add_wait_queue(&ctx->fault_pending_w 505 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); 509 /* 506 /* 510 * The smp_mb() after __set_current_st 507 * The smp_mb() after __set_current_state prevents the reads 511 * following the spin_unlock to happen 508 * following the spin_unlock to happen before the list_add in 512 * __add_wait_queue. 509 * __add_wait_queue. 513 */ 510 */ 514 set_current_state(blocking_state); 511 set_current_state(blocking_state); 515 spin_unlock_irq(&ctx->fault_pending_wq 512 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 516 513 517 if (!is_vm_hugetlb_page(vma)) !! 514 if (!is_vm_hugetlb_page(vmf->vma)) 518 must_wait = userfaultfd_must_w !! 515 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, >> 516 reason); 519 else 517 else 520 must_wait = userfaultfd_huge_m !! 518 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, 521 if (is_vm_hugetlb_page(vma)) !! 519 vmf->address, 522 hugetlb_vma_unlock_read(vma); !! 520 vmf->flags, reason); 523 release_fault_lock(vmf); !! 521 mmap_read_unlock(mm); 524 522 525 if (likely(must_wait && !READ_ONCE(ctx 523 if (likely(must_wait && !READ_ONCE(ctx->released))) { 526 wake_up_poll(&ctx->fd_wqh, EPO 524 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 527 schedule(); 525 schedule(); 528 } 526 } 529 527 530 __set_current_state(TASK_RUNNING); 528 __set_current_state(TASK_RUNNING); 531 529 532 /* 530 /* 533 * Here we race with the list_del; lis 531 * Here we race with the list_del; list_add in 534 * userfaultfd_ctx_read(), however bec 532 * userfaultfd_ctx_read(), however because we don't ever run 535 * list_del_init() to refile across th 533 * list_del_init() to refile across the two lists, the prev 536 * and next pointers will never point 534 * and next pointers will never point to self. list_add also 537 * would never let any of the two poin 535 * would never let any of the two pointers to point to 538 * self. So list_empty_careful won't r 536 * self. So list_empty_careful won't risk to see both pointers 539 * pointing to self at any time during 537 * pointing to self at any time during the list refile. The 540 * only case where list_del_init() is 538 * only case where list_del_init() is called is the full 541 * removal in the wake function and th 539 * removal in the wake function and there we don't re-list_add 542 * and it's fine not to block on the s 540 * and it's fine not to block on the spinlock. The uwq on this 543 * kernel stack can be released after 541 * kernel stack can be released after the list_del_init. 544 */ 542 */ 545 if (!list_empty_careful(&uwq.wq.entry) 543 if (!list_empty_careful(&uwq.wq.entry)) { 546 spin_lock_irq(&ctx->fault_pend 544 spin_lock_irq(&ctx->fault_pending_wqh.lock); 547 /* 545 /* 548 * No need of list_del_init(), 546 * No need of list_del_init(), the uwq on the stack 549 * will be freed shortly anywa 547 * will be freed shortly anyway. 550 */ 548 */ 551 list_del(&uwq.wq.entry); 549 list_del(&uwq.wq.entry); 552 spin_unlock_irq(&ctx->fault_pe 550 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 553 } 551 } 554 552 555 /* 553 /* 556 * ctx may go away after this if the u 554 * ctx may go away after this if the userfault pseudo fd is 557 * already released. 555 * already released. 558 */ 556 */ 559 userfaultfd_ctx_put(ctx); 557 userfaultfd_ctx_put(ctx); 560 558 561 out: 559 out: 562 return ret; 560 return ret; 563 } 561 } 564 562 565 static void userfaultfd_event_wait_completion( 563 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 566 564 struct userfaultfd_wait_queue *ewq) 567 { 565 { 568 struct userfaultfd_ctx *release_new_ct 566 struct userfaultfd_ctx *release_new_ctx; 569 567 570 if (WARN_ON_ONCE(current->flags & PF_E 568 if (WARN_ON_ONCE(current->flags & PF_EXITING)) 571 goto out; 569 goto out; 572 570 573 ewq->ctx = ctx; 571 ewq->ctx = ctx; 574 init_waitqueue_entry(&ewq->wq, current 572 init_waitqueue_entry(&ewq->wq, current); 575 release_new_ctx = NULL; 573 release_new_ctx = NULL; 576 574 577 spin_lock_irq(&ctx->event_wqh.lock); 575 spin_lock_irq(&ctx->event_wqh.lock); 578 /* 576 /* 579 * After the __add_wait_queue the uwq 577 * After the __add_wait_queue the uwq is visible to userland 580 * through poll/read(). 578 * through poll/read(). 581 */ 579 */ 582 __add_wait_queue(&ctx->event_wqh, &ewq 580 __add_wait_queue(&ctx->event_wqh, &ewq->wq); 583 for (;;) { 581 for (;;) { 584 set_current_state(TASK_KILLABL 582 set_current_state(TASK_KILLABLE); 585 if (ewq->msg.event == 0) 583 if (ewq->msg.event == 0) 586 break; 584 break; 587 if (READ_ONCE(ctx->released) | 585 if (READ_ONCE(ctx->released) || 588 fatal_signal_pending(curre 586 fatal_signal_pending(current)) { 589 /* 587 /* 590 * &ewq->wq may be que 588 * &ewq->wq may be queued in fork_event, but 591 * __remove_wait_queue 589 * __remove_wait_queue ignores the head 592 * parameter. It would 590 * parameter. It would be a problem if it 593 * didn't. 591 * didn't. 594 */ 592 */ 595 __remove_wait_queue(&c 593 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 596 if (ewq->msg.event == 594 if (ewq->msg.event == UFFD_EVENT_FORK) { 597 struct userfau 595 struct userfaultfd_ctx *new; 598 596 599 new = (struct 597 new = (struct userfaultfd_ctx *) 600 (unsig 598 (unsigned long) 601 ewq->m 599 ewq->msg.arg.reserved.reserved1; 602 release_new_ct 600 release_new_ctx = new; 603 } 601 } 604 break; 602 break; 605 } 603 } 606 604 607 spin_unlock_irq(&ctx->event_wq 605 spin_unlock_irq(&ctx->event_wqh.lock); 608 606 609 wake_up_poll(&ctx->fd_wqh, EPO 607 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 610 schedule(); 608 schedule(); 611 609 612 spin_lock_irq(&ctx->event_wqh. 610 spin_lock_irq(&ctx->event_wqh.lock); 613 } 611 } 614 __set_current_state(TASK_RUNNING); 612 __set_current_state(TASK_RUNNING); 615 spin_unlock_irq(&ctx->event_wqh.lock); 613 spin_unlock_irq(&ctx->event_wqh.lock); 616 614 617 if (release_new_ctx) { 615 if (release_new_ctx) { 618 struct vm_area_struct *vma; 616 struct vm_area_struct *vma; 619 struct mm_struct *mm = release 617 struct mm_struct *mm = release_new_ctx->mm; 620 VMA_ITERATOR(vmi, mm, 0); << 621 618 622 /* the various vma->vm_userfau 619 /* the various vma->vm_userfaultfd_ctx still points to it */ 623 mmap_write_lock(mm); 620 mmap_write_lock(mm); 624 for_each_vma(vmi, vma) { !! 621 for (vma = mm->mmap; vma; vma = vma->vm_next) 625 if (vma->vm_userfaultf 622 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { 626 vma_start_writ << 627 vma->vm_userfa 623 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 628 userfaultfd_se !! 624 vma->vm_flags &= ~__VM_UFFD_FLAGS; 629 << 630 } 625 } 631 } << 632 mmap_write_unlock(mm); 626 mmap_write_unlock(mm); 633 627 634 userfaultfd_ctx_put(release_ne 628 userfaultfd_ctx_put(release_new_ctx); 635 } 629 } 636 630 637 /* 631 /* 638 * ctx may go away after this if the u 632 * ctx may go away after this if the userfault pseudo fd is 639 * already released. 633 * already released. 640 */ 634 */ 641 out: 635 out: 642 atomic_dec(&ctx->mmap_changing); 636 atomic_dec(&ctx->mmap_changing); 643 VM_BUG_ON(atomic_read(&ctx->mmap_chang 637 VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); 644 userfaultfd_ctx_put(ctx); 638 userfaultfd_ctx_put(ctx); 645 } 639 } 646 640 647 static void userfaultfd_event_complete(struct 641 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, 648 struct 642 struct userfaultfd_wait_queue *ewq) 649 { 643 { 650 ewq->msg.event = 0; 644 ewq->msg.event = 0; 651 wake_up_locked(&ctx->event_wqh); 645 wake_up_locked(&ctx->event_wqh); 652 __remove_wait_queue(&ctx->event_wqh, & 646 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 653 } 647 } 654 648 655 int dup_userfaultfd(struct vm_area_struct *vma 649 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) 656 { 650 { 657 struct userfaultfd_ctx *ctx = NULL, *o 651 struct userfaultfd_ctx *ctx = NULL, *octx; 658 struct userfaultfd_fork_ctx *fctx; 652 struct userfaultfd_fork_ctx *fctx; 659 653 660 octx = vma->vm_userfaultfd_ctx.ctx; 654 octx = vma->vm_userfaultfd_ctx.ctx; 661 if (!octx) !! 655 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { 662 return 0; << 663 << 664 if (!(octx->features & UFFD_FEATURE_EV << 665 vma_start_write(vma); << 666 vma->vm_userfaultfd_ctx = NULL 656 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 667 userfaultfd_set_vm_flags(vma, !! 657 vma->vm_flags &= ~__VM_UFFD_FLAGS; 668 return 0; 658 return 0; 669 } 659 } 670 660 671 list_for_each_entry(fctx, fcs, list) 661 list_for_each_entry(fctx, fcs, list) 672 if (fctx->orig == octx) { 662 if (fctx->orig == octx) { 673 ctx = fctx->new; 663 ctx = fctx->new; 674 break; 664 break; 675 } 665 } 676 666 677 if (!ctx) { 667 if (!ctx) { 678 fctx = kmalloc(sizeof(*fctx), 668 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); 679 if (!fctx) 669 if (!fctx) 680 return -ENOMEM; 670 return -ENOMEM; 681 671 682 ctx = kmem_cache_alloc(userfau 672 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 683 if (!ctx) { 673 if (!ctx) { 684 kfree(fctx); 674 kfree(fctx); 685 return -ENOMEM; 675 return -ENOMEM; 686 } 676 } 687 677 688 refcount_set(&ctx->refcount, 1 678 refcount_set(&ctx->refcount, 1); 689 ctx->flags = octx->flags; 679 ctx->flags = octx->flags; 690 ctx->features = octx->features 680 ctx->features = octx->features; 691 ctx->released = false; 681 ctx->released = false; 692 init_rwsem(&ctx->map_changing_ << 693 atomic_set(&ctx->mmap_changing 682 atomic_set(&ctx->mmap_changing, 0); 694 ctx->mm = vma->vm_mm; 683 ctx->mm = vma->vm_mm; 695 mmgrab(ctx->mm); 684 mmgrab(ctx->mm); 696 685 697 userfaultfd_ctx_get(octx); 686 userfaultfd_ctx_get(octx); 698 down_write(&octx->map_changing << 699 atomic_inc(&octx->mmap_changin 687 atomic_inc(&octx->mmap_changing); 700 up_write(&octx->map_changing_l << 701 fctx->orig = octx; 688 fctx->orig = octx; 702 fctx->new = ctx; 689 fctx->new = ctx; 703 list_add_tail(&fctx->list, fcs 690 list_add_tail(&fctx->list, fcs); 704 } 691 } 705 692 706 vma->vm_userfaultfd_ctx.ctx = ctx; 693 vma->vm_userfaultfd_ctx.ctx = ctx; 707 return 0; 694 return 0; 708 } 695 } 709 696 710 static void dup_fctx(struct userfaultfd_fork_c 697 static void dup_fctx(struct userfaultfd_fork_ctx *fctx) 711 { 698 { 712 struct userfaultfd_ctx *ctx = fctx->or 699 struct userfaultfd_ctx *ctx = fctx->orig; 713 struct userfaultfd_wait_queue ewq; 700 struct userfaultfd_wait_queue ewq; 714 701 715 msg_init(&ewq.msg); 702 msg_init(&ewq.msg); 716 703 717 ewq.msg.event = UFFD_EVENT_FORK; 704 ewq.msg.event = UFFD_EVENT_FORK; 718 ewq.msg.arg.reserved.reserved1 = (unsi 705 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; 719 706 720 userfaultfd_event_wait_completion(ctx, 707 userfaultfd_event_wait_completion(ctx, &ewq); 721 } 708 } 722 709 723 void dup_userfaultfd_complete(struct list_head 710 void dup_userfaultfd_complete(struct list_head *fcs) 724 { 711 { 725 struct userfaultfd_fork_ctx *fctx, *n; 712 struct userfaultfd_fork_ctx *fctx, *n; 726 713 727 list_for_each_entry_safe(fctx, n, fcs, 714 list_for_each_entry_safe(fctx, n, fcs, list) { 728 dup_fctx(fctx); 715 dup_fctx(fctx); 729 list_del(&fctx->list); 716 list_del(&fctx->list); 730 kfree(fctx); 717 kfree(fctx); 731 } 718 } 732 } 719 } 733 720 734 void mremap_userfaultfd_prep(struct vm_area_st 721 void mremap_userfaultfd_prep(struct vm_area_struct *vma, 735 struct vm_userfau 722 struct vm_userfaultfd_ctx *vm_ctx) 736 { 723 { 737 struct userfaultfd_ctx *ctx; 724 struct userfaultfd_ctx *ctx; 738 725 739 ctx = vma->vm_userfaultfd_ctx.ctx; 726 ctx = vma->vm_userfaultfd_ctx.ctx; 740 727 741 if (!ctx) 728 if (!ctx) 742 return; 729 return; 743 730 744 if (ctx->features & UFFD_FEATURE_EVENT 731 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { 745 vm_ctx->ctx = ctx; 732 vm_ctx->ctx = ctx; 746 userfaultfd_ctx_get(ctx); 733 userfaultfd_ctx_get(ctx); 747 down_write(&ctx->map_changing_ << 748 atomic_inc(&ctx->mmap_changing 734 atomic_inc(&ctx->mmap_changing); 749 up_write(&ctx->map_changing_lo << 750 } else { 735 } else { 751 /* Drop uffd context if remap 736 /* Drop uffd context if remap feature not enabled */ 752 vma_start_write(vma); << 753 vma->vm_userfaultfd_ctx = NULL 737 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 754 userfaultfd_set_vm_flags(vma, !! 738 vma->vm_flags &= ~__VM_UFFD_FLAGS; 755 } 739 } 756 } 740 } 757 741 758 void mremap_userfaultfd_complete(struct vm_use 742 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, 759 unsigned long 743 unsigned long from, unsigned long to, 760 unsigned long 744 unsigned long len) 761 { 745 { 762 struct userfaultfd_ctx *ctx = vm_ctx-> 746 struct userfaultfd_ctx *ctx = vm_ctx->ctx; 763 struct userfaultfd_wait_queue ewq; 747 struct userfaultfd_wait_queue ewq; 764 748 765 if (!ctx) 749 if (!ctx) 766 return; 750 return; 767 751 768 if (to & ~PAGE_MASK) { 752 if (to & ~PAGE_MASK) { 769 userfaultfd_ctx_put(ctx); 753 userfaultfd_ctx_put(ctx); 770 return; 754 return; 771 } 755 } 772 756 773 msg_init(&ewq.msg); 757 msg_init(&ewq.msg); 774 758 775 ewq.msg.event = UFFD_EVENT_REMAP; 759 ewq.msg.event = UFFD_EVENT_REMAP; 776 ewq.msg.arg.remap.from = from; 760 ewq.msg.arg.remap.from = from; 777 ewq.msg.arg.remap.to = to; 761 ewq.msg.arg.remap.to = to; 778 ewq.msg.arg.remap.len = len; 762 ewq.msg.arg.remap.len = len; 779 763 780 userfaultfd_event_wait_completion(ctx, 764 userfaultfd_event_wait_completion(ctx, &ewq); 781 } 765 } 782 766 783 bool userfaultfd_remove(struct vm_area_struct 767 bool userfaultfd_remove(struct vm_area_struct *vma, 784 unsigned long start, u 768 unsigned long start, unsigned long end) 785 { 769 { 786 struct mm_struct *mm = vma->vm_mm; 770 struct mm_struct *mm = vma->vm_mm; 787 struct userfaultfd_ctx *ctx; 771 struct userfaultfd_ctx *ctx; 788 struct userfaultfd_wait_queue ewq; 772 struct userfaultfd_wait_queue ewq; 789 773 790 ctx = vma->vm_userfaultfd_ctx.ctx; 774 ctx = vma->vm_userfaultfd_ctx.ctx; 791 if (!ctx || !(ctx->features & UFFD_FEA 775 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) 792 return true; 776 return true; 793 777 794 userfaultfd_ctx_get(ctx); 778 userfaultfd_ctx_get(ctx); 795 down_write(&ctx->map_changing_lock); << 796 atomic_inc(&ctx->mmap_changing); 779 atomic_inc(&ctx->mmap_changing); 797 up_write(&ctx->map_changing_lock); << 798 mmap_read_unlock(mm); 780 mmap_read_unlock(mm); 799 781 800 msg_init(&ewq.msg); 782 msg_init(&ewq.msg); 801 783 802 ewq.msg.event = UFFD_EVENT_REMOVE; 784 ewq.msg.event = UFFD_EVENT_REMOVE; 803 ewq.msg.arg.remove.start = start; 785 ewq.msg.arg.remove.start = start; 804 ewq.msg.arg.remove.end = end; 786 ewq.msg.arg.remove.end = end; 805 787 806 userfaultfd_event_wait_completion(ctx, 788 userfaultfd_event_wait_completion(ctx, &ewq); 807 789 808 return false; 790 return false; 809 } 791 } 810 792 811 static bool has_unmap_ctx(struct userfaultfd_c 793 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, 812 unsigned long start, 794 unsigned long start, unsigned long end) 813 { 795 { 814 struct userfaultfd_unmap_ctx *unmap_ct 796 struct userfaultfd_unmap_ctx *unmap_ctx; 815 797 816 list_for_each_entry(unmap_ctx, unmaps, 798 list_for_each_entry(unmap_ctx, unmaps, list) 817 if (unmap_ctx->ctx == ctx && u 799 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && 818 unmap_ctx->end == end) 800 unmap_ctx->end == end) 819 return true; 801 return true; 820 802 821 return false; 803 return false; 822 } 804 } 823 805 824 int userfaultfd_unmap_prep(struct vm_area_stru !! 806 int userfaultfd_unmap_prep(struct vm_area_struct *vma, 825 unsigned long end, !! 807 unsigned long start, unsigned long end, 826 { !! 808 struct list_head *unmaps) 827 struct userfaultfd_unmap_ctx *unmap_ct !! 809 { 828 struct userfaultfd_ctx *ctx = vma->vm_ !! 810 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { >> 811 struct userfaultfd_unmap_ctx *unmap_ctx; >> 812 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; 829 813 830 if (!ctx || !(ctx->features & UFFD_FEA !! 814 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || 831 has_unmap_ctx(ctx, unmaps, start, !! 815 has_unmap_ctx(ctx, unmaps, start, end)) 832 return 0; !! 816 continue; 833 817 834 unmap_ctx = kzalloc(sizeof(*unmap_ctx) !! 818 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); 835 if (!unmap_ctx) !! 819 if (!unmap_ctx) 836 return -ENOMEM; !! 820 return -ENOMEM; 837 821 838 userfaultfd_ctx_get(ctx); !! 822 userfaultfd_ctx_get(ctx); 839 down_write(&ctx->map_changing_lock); !! 823 atomic_inc(&ctx->mmap_changing); 840 atomic_inc(&ctx->mmap_changing); !! 824 unmap_ctx->ctx = ctx; 841 up_write(&ctx->map_changing_lock); !! 825 unmap_ctx->start = start; 842 unmap_ctx->ctx = ctx; !! 826 unmap_ctx->end = end; 843 unmap_ctx->start = start; !! 827 list_add_tail(&unmap_ctx->list, unmaps); 844 unmap_ctx->end = end; !! 828 } 845 list_add_tail(&unmap_ctx->list, unmaps << 846 829 847 return 0; 830 return 0; 848 } 831 } 849 832 850 void userfaultfd_unmap_complete(struct mm_stru 833 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) 851 { 834 { 852 struct userfaultfd_unmap_ctx *ctx, *n; 835 struct userfaultfd_unmap_ctx *ctx, *n; 853 struct userfaultfd_wait_queue ewq; 836 struct userfaultfd_wait_queue ewq; 854 837 855 list_for_each_entry_safe(ctx, n, uf, l 838 list_for_each_entry_safe(ctx, n, uf, list) { 856 msg_init(&ewq.msg); 839 msg_init(&ewq.msg); 857 840 858 ewq.msg.event = UFFD_EVENT_UNM 841 ewq.msg.event = UFFD_EVENT_UNMAP; 859 ewq.msg.arg.remove.start = ctx 842 ewq.msg.arg.remove.start = ctx->start; 860 ewq.msg.arg.remove.end = ctx-> 843 ewq.msg.arg.remove.end = ctx->end; 861 844 862 userfaultfd_event_wait_complet 845 userfaultfd_event_wait_completion(ctx->ctx, &ewq); 863 846 864 list_del(&ctx->list); 847 list_del(&ctx->list); 865 kfree(ctx); 848 kfree(ctx); 866 } 849 } 867 } 850 } 868 851 869 static int userfaultfd_release(struct inode *i 852 static int userfaultfd_release(struct inode *inode, struct file *file) 870 { 853 { 871 struct userfaultfd_ctx *ctx = file->pr 854 struct userfaultfd_ctx *ctx = file->private_data; 872 struct mm_struct *mm = ctx->mm; 855 struct mm_struct *mm = ctx->mm; 873 struct vm_area_struct *vma, *prev; 856 struct vm_area_struct *vma, *prev; 874 /* len == 0 means wake all */ 857 /* len == 0 means wake all */ 875 struct userfaultfd_wake_range range = 858 struct userfaultfd_wake_range range = { .len = 0, }; 876 unsigned long new_flags; 859 unsigned long new_flags; 877 VMA_ITERATOR(vmi, mm, 0); << 878 860 879 WRITE_ONCE(ctx->released, true); 861 WRITE_ONCE(ctx->released, true); 880 862 881 if (!mmget_not_zero(mm)) 863 if (!mmget_not_zero(mm)) 882 goto wakeup; 864 goto wakeup; 883 865 884 /* 866 /* 885 * Flush page faults out of all CPUs. 867 * Flush page faults out of all CPUs. NOTE: all page faults 886 * must be retried without returning V 868 * must be retried without returning VM_FAULT_SIGBUS if 887 * userfaultfd_ctx_get() succeeds but 869 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx 888 * changes while handle_userfault rele 870 * changes while handle_userfault released the mmap_lock. So 889 * it's critical that released is set 871 * it's critical that released is set to true (above), before 890 * taking the mmap_lock for writing. 872 * taking the mmap_lock for writing. 891 */ 873 */ 892 mmap_write_lock(mm); 874 mmap_write_lock(mm); 893 prev = NULL; 875 prev = NULL; 894 for_each_vma(vmi, vma) { !! 876 for (vma = mm->mmap; vma; vma = vma->vm_next) { 895 cond_resched(); 877 cond_resched(); 896 BUG_ON(!!vma->vm_userfaultfd_c 878 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ 897 !!(vma->vm_flags & __VM 879 !!(vma->vm_flags & __VM_UFFD_FLAGS)); 898 if (vma->vm_userfaultfd_ctx.ct 880 if (vma->vm_userfaultfd_ctx.ctx != ctx) { 899 prev = vma; 881 prev = vma; 900 continue; 882 continue; 901 } 883 } 902 /* Reset ptes for the whole vm << 903 if (userfaultfd_wp(vma)) << 904 uffd_wp_range(vma, vma << 905 vma->vm_ << 906 new_flags = vma->vm_flags & ~_ 884 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 907 vma = vma_modify_flags_uffd(&v !! 885 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 908 vm !! 886 new_flags, vma->anon_vma, 909 NU !! 887 vma->vm_file, vma->vm_pgoff, 910 !! 888 vma_policy(vma), 911 vma_start_write(vma); !! 889 NULL_VM_UFFD_CTX, anon_vma_name(vma)); 912 userfaultfd_set_vm_flags(vma, !! 890 if (prev) >> 891 vma = prev; >> 892 else >> 893 prev = vma; >> 894 vma->vm_flags = new_flags; 913 vma->vm_userfaultfd_ctx = NULL 895 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 914 << 915 prev = vma; << 916 } 896 } 917 mmap_write_unlock(mm); 897 mmap_write_unlock(mm); 918 mmput(mm); 898 mmput(mm); 919 wakeup: 899 wakeup: 920 /* 900 /* 921 * After no new page faults can wait o 901 * After no new page faults can wait on this fault_*wqh, flush 922 * the last page faults that may have 902 * the last page faults that may have been already waiting on 923 * the fault_*wqh. 903 * the fault_*wqh. 924 */ 904 */ 925 spin_lock_irq(&ctx->fault_pending_wqh. 905 spin_lock_irq(&ctx->fault_pending_wqh.lock); 926 __wake_up_locked_key(&ctx->fault_pendi 906 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); 927 __wake_up(&ctx->fault_wqh, TASK_NORMAL 907 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); 928 spin_unlock_irq(&ctx->fault_pending_wq 908 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 929 909 930 /* Flush pending events that may still 910 /* Flush pending events that may still wait on event_wqh */ 931 wake_up_all(&ctx->event_wqh); 911 wake_up_all(&ctx->event_wqh); 932 912 933 wake_up_poll(&ctx->fd_wqh, EPOLLHUP); 913 wake_up_poll(&ctx->fd_wqh, EPOLLHUP); 934 userfaultfd_ctx_put(ctx); 914 userfaultfd_ctx_put(ctx); 935 return 0; 915 return 0; 936 } 916 } 937 917 938 /* fault_pending_wqh.lock must be hold by the 918 /* fault_pending_wqh.lock must be hold by the caller */ 939 static inline struct userfaultfd_wait_queue *f 919 static inline struct userfaultfd_wait_queue *find_userfault_in( 940 wait_queue_head_t *wqh) 920 wait_queue_head_t *wqh) 941 { 921 { 942 wait_queue_entry_t *wq; 922 wait_queue_entry_t *wq; 943 struct userfaultfd_wait_queue *uwq; 923 struct userfaultfd_wait_queue *uwq; 944 924 945 lockdep_assert_held(&wqh->lock); 925 lockdep_assert_held(&wqh->lock); 946 926 947 uwq = NULL; 927 uwq = NULL; 948 if (!waitqueue_active(wqh)) 928 if (!waitqueue_active(wqh)) 949 goto out; 929 goto out; 950 /* walk in reverse to provide FIFO beh 930 /* walk in reverse to provide FIFO behavior to read userfaults */ 951 wq = list_last_entry(&wqh->head, typeo 931 wq = list_last_entry(&wqh->head, typeof(*wq), entry); 952 uwq = container_of(wq, struct userfaul 932 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 953 out: 933 out: 954 return uwq; 934 return uwq; 955 } 935 } 956 936 957 static inline struct userfaultfd_wait_queue *f 937 static inline struct userfaultfd_wait_queue *find_userfault( 958 struct userfaultfd_ctx *ctx) 938 struct userfaultfd_ctx *ctx) 959 { 939 { 960 return find_userfault_in(&ctx->fault_p 940 return find_userfault_in(&ctx->fault_pending_wqh); 961 } 941 } 962 942 963 static inline struct userfaultfd_wait_queue *f 943 static inline struct userfaultfd_wait_queue *find_userfault_evt( 964 struct userfaultfd_ctx *ctx) 944 struct userfaultfd_ctx *ctx) 965 { 945 { 966 return find_userfault_in(&ctx->event_w 946 return find_userfault_in(&ctx->event_wqh); 967 } 947 } 968 948 969 static __poll_t userfaultfd_poll(struct file * 949 static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) 970 { 950 { 971 struct userfaultfd_ctx *ctx = file->pr 951 struct userfaultfd_ctx *ctx = file->private_data; 972 __poll_t ret; 952 __poll_t ret; 973 953 974 poll_wait(file, &ctx->fd_wqh, wait); 954 poll_wait(file, &ctx->fd_wqh, wait); 975 955 976 if (!userfaultfd_is_initialized(ctx)) 956 if (!userfaultfd_is_initialized(ctx)) 977 return EPOLLERR; 957 return EPOLLERR; 978 958 979 /* 959 /* 980 * poll() never guarantees that read w 960 * poll() never guarantees that read won't block. 981 * userfaults can be waken before they 961 * userfaults can be waken before they're read(). 982 */ 962 */ 983 if (unlikely(!(file->f_flags & O_NONBL 963 if (unlikely(!(file->f_flags & O_NONBLOCK))) 984 return EPOLLERR; 964 return EPOLLERR; 985 /* 965 /* 986 * lockless access to see if there are 966 * lockless access to see if there are pending faults 987 * __pollwait last action is the add_w 967 * __pollwait last action is the add_wait_queue but 988 * the spin_unlock would allow the wai 968 * the spin_unlock would allow the waitqueue_active to 989 * pass above the actual list_add insi 969 * pass above the actual list_add inside 990 * add_wait_queue critical section. So 970 * add_wait_queue critical section. So use a full 991 * memory barrier to serialize the lis 971 * memory barrier to serialize the list_add write of 992 * add_wait_queue() with the waitqueue 972 * add_wait_queue() with the waitqueue_active read 993 * below. 973 * below. 994 */ 974 */ 995 ret = 0; 975 ret = 0; 996 smp_mb(); 976 smp_mb(); 997 if (waitqueue_active(&ctx->fault_pendi 977 if (waitqueue_active(&ctx->fault_pending_wqh)) 998 ret = EPOLLIN; 978 ret = EPOLLIN; 999 else if (waitqueue_active(&ctx->event_ 979 else if (waitqueue_active(&ctx->event_wqh)) 1000 ret = EPOLLIN; 980 ret = EPOLLIN; 1001 981 1002 return ret; 982 return ret; 1003 } 983 } 1004 984 1005 static const struct file_operations userfault 985 static const struct file_operations userfaultfd_fops; 1006 986 1007 static int resolve_userfault_fork(struct user 987 static int resolve_userfault_fork(struct userfaultfd_ctx *new, 1008 struct inod 988 struct inode *inode, 1009 struct uffd 989 struct uffd_msg *msg) 1010 { 990 { 1011 int fd; 991 int fd; 1012 992 1013 fd = anon_inode_create_getfd("[userfa !! 993 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new, 1014 O_RDONLY | (new->flag 994 O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); 1015 if (fd < 0) 995 if (fd < 0) 1016 return fd; 996 return fd; 1017 997 1018 msg->arg.reserved.reserved1 = 0; 998 msg->arg.reserved.reserved1 = 0; 1019 msg->arg.fork.ufd = fd; 999 msg->arg.fork.ufd = fd; 1020 return 0; 1000 return 0; 1021 } 1001 } 1022 1002 1023 static ssize_t userfaultfd_ctx_read(struct us 1003 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, 1024 struct uf 1004 struct uffd_msg *msg, struct inode *inode) 1025 { 1005 { 1026 ssize_t ret; 1006 ssize_t ret; 1027 DECLARE_WAITQUEUE(wait, current); 1007 DECLARE_WAITQUEUE(wait, current); 1028 struct userfaultfd_wait_queue *uwq; 1008 struct userfaultfd_wait_queue *uwq; 1029 /* 1009 /* 1030 * Handling fork event requires sleep 1010 * Handling fork event requires sleeping operations, so 1031 * we drop the event_wqh lock, then d 1011 * we drop the event_wqh lock, then do these ops, then 1032 * lock it back and wake up the waite 1012 * lock it back and wake up the waiter. While the lock is 1033 * dropped the ewq may go away so we 1013 * dropped the ewq may go away so we keep track of it 1034 * carefully. 1014 * carefully. 1035 */ 1015 */ 1036 LIST_HEAD(fork_event); 1016 LIST_HEAD(fork_event); 1037 struct userfaultfd_ctx *fork_nctx = N 1017 struct userfaultfd_ctx *fork_nctx = NULL; 1038 1018 1039 /* always take the fd_wqh lock before 1019 /* always take the fd_wqh lock before the fault_pending_wqh lock */ 1040 spin_lock_irq(&ctx->fd_wqh.lock); 1020 spin_lock_irq(&ctx->fd_wqh.lock); 1041 __add_wait_queue(&ctx->fd_wqh, &wait) 1021 __add_wait_queue(&ctx->fd_wqh, &wait); 1042 for (;;) { 1022 for (;;) { 1043 set_current_state(TASK_INTERR 1023 set_current_state(TASK_INTERRUPTIBLE); 1044 spin_lock(&ctx->fault_pending 1024 spin_lock(&ctx->fault_pending_wqh.lock); 1045 uwq = find_userfault(ctx); 1025 uwq = find_userfault(ctx); 1046 if (uwq) { 1026 if (uwq) { 1047 /* 1027 /* 1048 * Use a seqcount to 1028 * Use a seqcount to repeat the lockless check 1049 * in wake_userfault( 1029 * in wake_userfault() to avoid missing 1050 * wakeups because du 1030 * wakeups because during the refile both 1051 * waitqueue could be 1031 * waitqueue could become empty if this is the 1052 * only userfault. 1032 * only userfault. 1053 */ 1033 */ 1054 write_seqcount_begin( 1034 write_seqcount_begin(&ctx->refile_seq); 1055 1035 1056 /* 1036 /* 1057 * The fault_pending_ 1037 * The fault_pending_wqh.lock prevents the uwq 1058 * to disappear from 1038 * to disappear from under us. 1059 * 1039 * 1060 * Refile this userfa 1040 * Refile this userfault from 1061 * fault_pending_wqh 1041 * fault_pending_wqh to fault_wqh, it's not 1062 * pending anymore af 1042 * pending anymore after we read it. 1063 * 1043 * 1064 * Use list_del() by 1044 * Use list_del() by hand (as 1065 * userfaultfd_wake_f 1045 * userfaultfd_wake_function also uses 1066 * list_del_init() by 1046 * list_del_init() by hand) to be sure nobody 1067 * changes __remove_w 1047 * changes __remove_wait_queue() to use 1068 * list_del_init() in 1048 * list_del_init() in turn breaking the 1069 * !list_empty_carefu 1049 * !list_empty_careful() check in 1070 * handle_userfault() 1050 * handle_userfault(). The uwq->wq.head list 1071 * must never be empt 1051 * must never be empty at any time during the 1072 * refile, or the wai 1052 * refile, or the waitqueue could disappear 1073 * from under us. The 1053 * from under us. The "wait_queue_head_t" 1074 * parameter of __rem 1054 * parameter of __remove_wait_queue() is unused 1075 * anyway. 1055 * anyway. 1076 */ 1056 */ 1077 list_del(&uwq->wq.ent 1057 list_del(&uwq->wq.entry); 1078 add_wait_queue(&ctx-> 1058 add_wait_queue(&ctx->fault_wqh, &uwq->wq); 1079 1059 1080 write_seqcount_end(&c 1060 write_seqcount_end(&ctx->refile_seq); 1081 1061 1082 /* careful to always 1062 /* careful to always initialize msg if ret == 0 */ 1083 *msg = uwq->msg; 1063 *msg = uwq->msg; 1084 spin_unlock(&ctx->fau 1064 spin_unlock(&ctx->fault_pending_wqh.lock); 1085 ret = 0; 1065 ret = 0; 1086 break; 1066 break; 1087 } 1067 } 1088 spin_unlock(&ctx->fault_pendi 1068 spin_unlock(&ctx->fault_pending_wqh.lock); 1089 1069 1090 spin_lock(&ctx->event_wqh.loc 1070 spin_lock(&ctx->event_wqh.lock); 1091 uwq = find_userfault_evt(ctx) 1071 uwq = find_userfault_evt(ctx); 1092 if (uwq) { 1072 if (uwq) { 1093 *msg = uwq->msg; 1073 *msg = uwq->msg; 1094 1074 1095 if (uwq->msg.event == 1075 if (uwq->msg.event == UFFD_EVENT_FORK) { 1096 fork_nctx = ( 1076 fork_nctx = (struct userfaultfd_ctx *) 1097 (unsi 1077 (unsigned long) 1098 uwq-> 1078 uwq->msg.arg.reserved.reserved1; 1099 list_move(&uw 1079 list_move(&uwq->wq.entry, &fork_event); 1100 /* 1080 /* 1101 * fork_nctx 1081 * fork_nctx can be freed as soon as 1102 * we drop th 1082 * we drop the lock, unless we take a 1103 * reference 1083 * reference on it. 1104 */ 1084 */ 1105 userfaultfd_c 1085 userfaultfd_ctx_get(fork_nctx); 1106 spin_unlock(& 1086 spin_unlock(&ctx->event_wqh.lock); 1107 ret = 0; 1087 ret = 0; 1108 break; 1088 break; 1109 } 1089 } 1110 1090 1111 userfaultfd_event_com 1091 userfaultfd_event_complete(ctx, uwq); 1112 spin_unlock(&ctx->eve 1092 spin_unlock(&ctx->event_wqh.lock); 1113 ret = 0; 1093 ret = 0; 1114 break; 1094 break; 1115 } 1095 } 1116 spin_unlock(&ctx->event_wqh.l 1096 spin_unlock(&ctx->event_wqh.lock); 1117 1097 1118 if (signal_pending(current)) 1098 if (signal_pending(current)) { 1119 ret = -ERESTARTSYS; 1099 ret = -ERESTARTSYS; 1120 break; 1100 break; 1121 } 1101 } 1122 if (no_wait) { 1102 if (no_wait) { 1123 ret = -EAGAIN; 1103 ret = -EAGAIN; 1124 break; 1104 break; 1125 } 1105 } 1126 spin_unlock_irq(&ctx->fd_wqh. 1106 spin_unlock_irq(&ctx->fd_wqh.lock); 1127 schedule(); 1107 schedule(); 1128 spin_lock_irq(&ctx->fd_wqh.lo 1108 spin_lock_irq(&ctx->fd_wqh.lock); 1129 } 1109 } 1130 __remove_wait_queue(&ctx->fd_wqh, &wa 1110 __remove_wait_queue(&ctx->fd_wqh, &wait); 1131 __set_current_state(TASK_RUNNING); 1111 __set_current_state(TASK_RUNNING); 1132 spin_unlock_irq(&ctx->fd_wqh.lock); 1112 spin_unlock_irq(&ctx->fd_wqh.lock); 1133 1113 1134 if (!ret && msg->event == UFFD_EVENT_ 1114 if (!ret && msg->event == UFFD_EVENT_FORK) { 1135 ret = resolve_userfault_fork( 1115 ret = resolve_userfault_fork(fork_nctx, inode, msg); 1136 spin_lock_irq(&ctx->event_wqh 1116 spin_lock_irq(&ctx->event_wqh.lock); 1137 if (!list_empty(&fork_event)) 1117 if (!list_empty(&fork_event)) { 1138 /* 1118 /* 1139 * The fork thread di 1119 * The fork thread didn't abort, so we can 1140 * drop the temporary 1120 * drop the temporary refcount. 1141 */ 1121 */ 1142 userfaultfd_ctx_put(f 1122 userfaultfd_ctx_put(fork_nctx); 1143 1123 1144 uwq = list_first_entr 1124 uwq = list_first_entry(&fork_event, 1145 1125 typeof(*uwq), 1146 1126 wq.entry); 1147 /* 1127 /* 1148 * If fork_event list 1128 * If fork_event list wasn't empty and in turn 1149 * the event wasn't a 1129 * the event wasn't already released by fork 1150 * (the event is allo 1130 * (the event is allocated on fork kernel 1151 * stack), put the ev 1131 * stack), put the event back to its place in 1152 * the event_wq. fork 1132 * the event_wq. fork_event head will be freed 1153 * as soon as we retu 1133 * as soon as we return so the event cannot 1154 * stay queued there 1134 * stay queued there no matter the current 1155 * "ret" value. 1135 * "ret" value. 1156 */ 1136 */ 1157 list_del(&uwq->wq.ent 1137 list_del(&uwq->wq.entry); 1158 __add_wait_queue(&ctx 1138 __add_wait_queue(&ctx->event_wqh, &uwq->wq); 1159 1139 1160 /* 1140 /* 1161 * Leave the event in 1141 * Leave the event in the waitqueue and report 1162 * error to userland 1142 * error to userland if we failed to resolve 1163 * the userfault fork 1143 * the userfault fork. 1164 */ 1144 */ 1165 if (likely(!ret)) 1145 if (likely(!ret)) 1166 userfaultfd_e 1146 userfaultfd_event_complete(ctx, uwq); 1167 } else { 1147 } else { 1168 /* 1148 /* 1169 * Here the fork thre 1149 * Here the fork thread aborted and the 1170 * refcount from the 1150 * refcount from the fork thread on fork_nctx 1171 * has already been r 1151 * has already been released. We still hold 1172 * the reference we t 1152 * the reference we took before releasing the 1173 * lock above. If res 1153 * lock above. If resolve_userfault_fork 1174 * failed we've to dr 1154 * failed we've to drop it because the 1175 * fork_nctx has to b 1155 * fork_nctx has to be freed in such case. If 1176 * it succeeded we'll 1156 * it succeeded we'll hold it because the new 1177 * uffd references it 1157 * uffd references it. 1178 */ 1158 */ 1179 if (ret) 1159 if (ret) 1180 userfaultfd_c 1160 userfaultfd_ctx_put(fork_nctx); 1181 } 1161 } 1182 spin_unlock_irq(&ctx->event_w 1162 spin_unlock_irq(&ctx->event_wqh.lock); 1183 } 1163 } 1184 1164 1185 return ret; 1165 return ret; 1186 } 1166 } 1187 1167 1188 static ssize_t userfaultfd_read_iter(struct k !! 1168 static ssize_t userfaultfd_read(struct file *file, char __user *buf, >> 1169 size_t count, loff_t *ppos) 1189 { 1170 { 1190 struct file *file = iocb->ki_filp; << 1191 struct userfaultfd_ctx *ctx = file->p 1171 struct userfaultfd_ctx *ctx = file->private_data; 1192 ssize_t _ret, ret = 0; 1172 ssize_t _ret, ret = 0; 1193 struct uffd_msg msg; 1173 struct uffd_msg msg; >> 1174 int no_wait = file->f_flags & O_NONBLOCK; 1194 struct inode *inode = file_inode(file 1175 struct inode *inode = file_inode(file); 1195 bool no_wait; << 1196 1176 1197 if (!userfaultfd_is_initialized(ctx)) 1177 if (!userfaultfd_is_initialized(ctx)) 1198 return -EINVAL; 1178 return -EINVAL; 1199 1179 1200 no_wait = file->f_flags & O_NONBLOCK << 1201 for (;;) { 1180 for (;;) { 1202 if (iov_iter_count(to) < size !! 1181 if (count < sizeof(msg)) 1203 return ret ? ret : -E 1182 return ret ? ret : -EINVAL; 1204 _ret = userfaultfd_ctx_read(c 1183 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); 1205 if (_ret < 0) 1184 if (_ret < 0) 1206 return ret ? ret : _r 1185 return ret ? ret : _ret; 1207 _ret = !copy_to_iter_full(&ms !! 1186 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) 1208 if (_ret) << 1209 return ret ? ret : -E 1187 return ret ? ret : -EFAULT; 1210 ret += sizeof(msg); 1188 ret += sizeof(msg); >> 1189 buf += sizeof(msg); >> 1190 count -= sizeof(msg); 1211 /* 1191 /* 1212 * Allow to read more than on 1192 * Allow to read more than one fault at time but only 1213 * block if waiting for the v 1193 * block if waiting for the very first one. 1214 */ 1194 */ 1215 no_wait = true; !! 1195 no_wait = O_NONBLOCK; 1216 } 1196 } 1217 } 1197 } 1218 1198 1219 static void __wake_userfault(struct userfault 1199 static void __wake_userfault(struct userfaultfd_ctx *ctx, 1220 struct userfault 1200 struct userfaultfd_wake_range *range) 1221 { 1201 { 1222 spin_lock_irq(&ctx->fault_pending_wqh 1202 spin_lock_irq(&ctx->fault_pending_wqh.lock); 1223 /* wake all in the range and autoremo 1203 /* wake all in the range and autoremove */ 1224 if (waitqueue_active(&ctx->fault_pend 1204 if (waitqueue_active(&ctx->fault_pending_wqh)) 1225 __wake_up_locked_key(&ctx->fa 1205 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 1226 range); 1206 range); 1227 if (waitqueue_active(&ctx->fault_wqh) 1207 if (waitqueue_active(&ctx->fault_wqh)) 1228 __wake_up(&ctx->fault_wqh, TA 1208 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); 1229 spin_unlock_irq(&ctx->fault_pending_w 1209 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 1230 } 1210 } 1231 1211 1232 static __always_inline void wake_userfault(st 1212 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, 1233 st 1213 struct userfaultfd_wake_range *range) 1234 { 1214 { 1235 unsigned seq; 1215 unsigned seq; 1236 bool need_wakeup; 1216 bool need_wakeup; 1237 1217 1238 /* 1218 /* 1239 * To be sure waitqueue_active() is n 1219 * To be sure waitqueue_active() is not reordered by the CPU 1240 * before the pagetable update, use a 1220 * before the pagetable update, use an explicit SMP memory 1241 * barrier here. PT lock release or m 1221 * barrier here. PT lock release or mmap_read_unlock(mm) still 1242 * have release semantics that can al 1222 * have release semantics that can allow the 1243 * waitqueue_active() to be reordered 1223 * waitqueue_active() to be reordered before the pte update. 1244 */ 1224 */ 1245 smp_mb(); 1225 smp_mb(); 1246 1226 1247 /* 1227 /* 1248 * Use waitqueue_active because it's 1228 * Use waitqueue_active because it's very frequent to 1249 * change the address space atomicall 1229 * change the address space atomically even if there are no 1250 * userfaults yet. So we take the spi 1230 * userfaults yet. So we take the spinlock only when we're 1251 * sure we've userfaults to wake. 1231 * sure we've userfaults to wake. 1252 */ 1232 */ 1253 do { 1233 do { 1254 seq = read_seqcount_begin(&ct 1234 seq = read_seqcount_begin(&ctx->refile_seq); 1255 need_wakeup = waitqueue_activ 1235 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || 1256 waitqueue_active(&ctx 1236 waitqueue_active(&ctx->fault_wqh); 1257 cond_resched(); 1237 cond_resched(); 1258 } while (read_seqcount_retry(&ctx->re 1238 } while (read_seqcount_retry(&ctx->refile_seq, seq)); 1259 if (need_wakeup) 1239 if (need_wakeup) 1260 __wake_userfault(ctx, range); 1240 __wake_userfault(ctx, range); 1261 } 1241 } 1262 1242 1263 static __always_inline int validate_unaligned !! 1243 static __always_inline int validate_range(struct mm_struct *mm, 1264 struct mm_struct *mm, __u64 start, __ !! 1244 __u64 start, __u64 len) 1265 { 1245 { 1266 __u64 task_size = mm->task_size; 1246 __u64 task_size = mm->task_size; 1267 1247 >> 1248 if (start & ~PAGE_MASK) >> 1249 return -EINVAL; 1268 if (len & ~PAGE_MASK) 1250 if (len & ~PAGE_MASK) 1269 return -EINVAL; 1251 return -EINVAL; 1270 if (!len) 1252 if (!len) 1271 return -EINVAL; 1253 return -EINVAL; 1272 if (start < mmap_min_addr) 1254 if (start < mmap_min_addr) 1273 return -EINVAL; 1255 return -EINVAL; 1274 if (start >= task_size) 1256 if (start >= task_size) 1275 return -EINVAL; 1257 return -EINVAL; 1276 if (len > task_size - start) 1258 if (len > task_size - start) 1277 return -EINVAL; 1259 return -EINVAL; 1278 if (start + len <= start) << 1279 return -EINVAL; << 1280 return 0; 1260 return 0; 1281 } 1261 } 1282 1262 1283 static __always_inline int validate_range(str << 1284 __u << 1285 { << 1286 if (start & ~PAGE_MASK) << 1287 return -EINVAL; << 1288 << 1289 return validate_unaligned_range(mm, s << 1290 } << 1291 << 1292 static int userfaultfd_register(struct userfa 1263 static int userfaultfd_register(struct userfaultfd_ctx *ctx, 1293 unsigned long 1264 unsigned long arg) 1294 { 1265 { 1295 struct mm_struct *mm = ctx->mm; 1266 struct mm_struct *mm = ctx->mm; 1296 struct vm_area_struct *vma, *prev, *c 1267 struct vm_area_struct *vma, *prev, *cur; 1297 int ret; 1268 int ret; 1298 struct uffdio_register uffdio_registe 1269 struct uffdio_register uffdio_register; 1299 struct uffdio_register __user *user_u 1270 struct uffdio_register __user *user_uffdio_register; 1300 unsigned long vm_flags, new_flags; 1271 unsigned long vm_flags, new_flags; 1301 bool found; 1272 bool found; 1302 bool basic_ioctls; 1273 bool basic_ioctls; 1303 unsigned long start, end, vma_end; 1274 unsigned long start, end, vma_end; 1304 struct vma_iterator vmi; << 1305 bool wp_async = userfaultfd_wp_async_ << 1306 1275 1307 user_uffdio_register = (struct uffdio 1276 user_uffdio_register = (struct uffdio_register __user *) arg; 1308 1277 1309 ret = -EFAULT; 1278 ret = -EFAULT; 1310 if (copy_from_user(&uffdio_register, 1279 if (copy_from_user(&uffdio_register, user_uffdio_register, 1311 sizeof(uffdio_regi 1280 sizeof(uffdio_register)-sizeof(__u64))) 1312 goto out; 1281 goto out; 1313 1282 1314 ret = -EINVAL; 1283 ret = -EINVAL; 1315 if (!uffdio_register.mode) 1284 if (!uffdio_register.mode) 1316 goto out; 1285 goto out; 1317 if (uffdio_register.mode & ~UFFD_API_ 1286 if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) 1318 goto out; 1287 goto out; 1319 vm_flags = 0; 1288 vm_flags = 0; 1320 if (uffdio_register.mode & UFFDIO_REG 1289 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) 1321 vm_flags |= VM_UFFD_MISSING; 1290 vm_flags |= VM_UFFD_MISSING; 1322 if (uffdio_register.mode & UFFDIO_REG 1291 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { 1323 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1292 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1324 goto out; 1293 goto out; 1325 #endif 1294 #endif 1326 vm_flags |= VM_UFFD_WP; 1295 vm_flags |= VM_UFFD_WP; 1327 } 1296 } 1328 if (uffdio_register.mode & UFFDIO_REG 1297 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { 1329 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 1298 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 1330 goto out; 1299 goto out; 1331 #endif 1300 #endif 1332 vm_flags |= VM_UFFD_MINOR; 1301 vm_flags |= VM_UFFD_MINOR; 1333 } 1302 } 1334 1303 1335 ret = validate_range(mm, uffdio_regis 1304 ret = validate_range(mm, uffdio_register.range.start, 1336 uffdio_register. 1305 uffdio_register.range.len); 1337 if (ret) 1306 if (ret) 1338 goto out; 1307 goto out; 1339 1308 1340 start = uffdio_register.range.start; 1309 start = uffdio_register.range.start; 1341 end = start + uffdio_register.range.l 1310 end = start + uffdio_register.range.len; 1342 1311 1343 ret = -ENOMEM; 1312 ret = -ENOMEM; 1344 if (!mmget_not_zero(mm)) 1313 if (!mmget_not_zero(mm)) 1345 goto out; 1314 goto out; 1346 1315 1347 ret = -EINVAL; << 1348 mmap_write_lock(mm); 1316 mmap_write_lock(mm); 1349 vma_iter_init(&vmi, mm, start); !! 1317 vma = find_vma_prev(mm, start, &prev); 1350 vma = vma_find(&vmi, end); << 1351 if (!vma) 1318 if (!vma) 1352 goto out_unlock; 1319 goto out_unlock; 1353 1320 >> 1321 /* check that there's at least one vma in the range */ >> 1322 ret = -EINVAL; >> 1323 if (vma->vm_start >= end) >> 1324 goto out_unlock; >> 1325 1354 /* 1326 /* 1355 * If the first vma contains huge pag 1327 * If the first vma contains huge pages, make sure start address 1356 * is aligned to huge page size. 1328 * is aligned to huge page size. 1357 */ 1329 */ 1358 if (is_vm_hugetlb_page(vma)) { 1330 if (is_vm_hugetlb_page(vma)) { 1359 unsigned long vma_hpagesize = 1331 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1360 1332 1361 if (start & (vma_hpagesize - 1333 if (start & (vma_hpagesize - 1)) 1362 goto out_unlock; 1334 goto out_unlock; 1363 } 1335 } 1364 1336 1365 /* 1337 /* 1366 * Search for not compatible vmas. 1338 * Search for not compatible vmas. 1367 */ 1339 */ 1368 found = false; 1340 found = false; 1369 basic_ioctls = false; 1341 basic_ioctls = false; 1370 cur = vma; !! 1342 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { 1371 do { << 1372 cond_resched(); 1343 cond_resched(); 1373 1344 1374 BUG_ON(!!cur->vm_userfaultfd_ 1345 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 1375 !!(cur->vm_flags & __V 1346 !!(cur->vm_flags & __VM_UFFD_FLAGS)); 1376 1347 1377 /* check not compatible vmas 1348 /* check not compatible vmas */ 1378 ret = -EINVAL; 1349 ret = -EINVAL; 1379 if (!vma_can_userfault(cur, v !! 1350 if (!vma_can_userfault(cur, vm_flags)) 1380 goto out_unlock; 1351 goto out_unlock; 1381 1352 1382 /* 1353 /* 1383 * UFFDIO_COPY will fill file 1354 * UFFDIO_COPY will fill file holes even without 1384 * PROT_WRITE. This check enf 1355 * PROT_WRITE. This check enforces that if this is a 1385 * MAP_SHARED, the process ha 1356 * MAP_SHARED, the process has write permission to the backing 1386 * file. If VM_MAYWRITE is se 1357 * file. If VM_MAYWRITE is set it also enforces that on a 1387 * MAP_SHARED vma: there is n 1358 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further 1388 * F_WRITE_SEAL can be taken 1359 * F_WRITE_SEAL can be taken until the vma is destroyed. 1389 */ 1360 */ 1390 ret = -EPERM; 1361 ret = -EPERM; 1391 if (unlikely(!(cur->vm_flags 1362 if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) 1392 goto out_unlock; 1363 goto out_unlock; 1393 1364 1394 /* 1365 /* 1395 * If this vma contains endin 1366 * If this vma contains ending address, and huge pages 1396 * check alignment. 1367 * check alignment. 1397 */ 1368 */ 1398 if (is_vm_hugetlb_page(cur) & 1369 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && 1399 end > cur->vm_start) { 1370 end > cur->vm_start) { 1400 unsigned long vma_hpa 1371 unsigned long vma_hpagesize = vma_kernel_pagesize(cur); 1401 1372 1402 ret = -EINVAL; 1373 ret = -EINVAL; 1403 1374 1404 if (end & (vma_hpages 1375 if (end & (vma_hpagesize - 1)) 1405 goto out_unlo 1376 goto out_unlock; 1406 } 1377 } 1407 if ((vm_flags & VM_UFFD_WP) & 1378 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) 1408 goto out_unlock; 1379 goto out_unlock; 1409 1380 1410 /* 1381 /* 1411 * Check that this vma isn't 1382 * Check that this vma isn't already owned by a 1412 * different userfaultfd. We 1383 * different userfaultfd. We can't allow more than one 1413 * userfaultfd to own a singl 1384 * userfaultfd to own a single vma simultaneously or we 1414 * wouldn't know which one to 1385 * wouldn't know which one to deliver the userfaults to. 1415 */ 1386 */ 1416 ret = -EBUSY; 1387 ret = -EBUSY; 1417 if (cur->vm_userfaultfd_ctx.c 1388 if (cur->vm_userfaultfd_ctx.ctx && 1418 cur->vm_userfaultfd_ctx.c 1389 cur->vm_userfaultfd_ctx.ctx != ctx) 1419 goto out_unlock; 1390 goto out_unlock; 1420 1391 1421 /* 1392 /* 1422 * Note vmas containing huge 1393 * Note vmas containing huge pages 1423 */ 1394 */ 1424 if (is_vm_hugetlb_page(cur)) 1395 if (is_vm_hugetlb_page(cur)) 1425 basic_ioctls = true; 1396 basic_ioctls = true; 1426 1397 1427 found = true; 1398 found = true; 1428 } for_each_vma_range(vmi, cur, end); !! 1399 } 1429 BUG_ON(!found); 1400 BUG_ON(!found); 1430 1401 1431 vma_iter_set(&vmi, start); << 1432 prev = vma_prev(&vmi); << 1433 if (vma->vm_start < start) 1402 if (vma->vm_start < start) 1434 prev = vma; 1403 prev = vma; 1435 1404 1436 ret = 0; 1405 ret = 0; 1437 for_each_vma_range(vmi, vma, end) { !! 1406 do { 1438 cond_resched(); 1407 cond_resched(); 1439 1408 1440 BUG_ON(!vma_can_userfault(vma !! 1409 BUG_ON(!vma_can_userfault(vma, vm_flags)); 1441 BUG_ON(vma->vm_userfaultfd_ct 1410 BUG_ON(vma->vm_userfaultfd_ctx.ctx && 1442 vma->vm_userfaultfd_ct 1411 vma->vm_userfaultfd_ctx.ctx != ctx); 1443 WARN_ON(!(vma->vm_flags & VM_ 1412 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1444 1413 1445 /* 1414 /* 1446 * Nothing to do: this vma is 1415 * Nothing to do: this vma is already registered into this 1447 * userfaultfd and with the r 1416 * userfaultfd and with the right tracking mode too. 1448 */ 1417 */ 1449 if (vma->vm_userfaultfd_ctx.c 1418 if (vma->vm_userfaultfd_ctx.ctx == ctx && 1450 (vma->vm_flags & vm_flags 1419 (vma->vm_flags & vm_flags) == vm_flags) 1451 goto skip; 1420 goto skip; 1452 1421 1453 if (vma->vm_start > start) 1422 if (vma->vm_start > start) 1454 start = vma->vm_start 1423 start = vma->vm_start; 1455 vma_end = min(end, vma->vm_en 1424 vma_end = min(end, vma->vm_end); 1456 1425 1457 new_flags = (vma->vm_flags & 1426 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; 1458 vma = vma_modify_flags_uffd(& !! 1427 prev = vma_merge(mm, prev, start, vma_end, new_flags, 1459 n !! 1428 vma->anon_vma, vma->vm_file, vma->vm_pgoff, 1460 ( !! 1429 vma_policy(vma), 1461 if (IS_ERR(vma)) { !! 1430 ((struct vm_userfaultfd_ctx){ ctx }), 1462 ret = PTR_ERR(vma); !! 1431 anon_vma_name(vma)); 1463 break; !! 1432 if (prev) { >> 1433 vma = prev; >> 1434 goto next; 1464 } 1435 } 1465 !! 1436 if (vma->vm_start < start) { >> 1437 ret = split_vma(mm, vma, start, 1); >> 1438 if (ret) >> 1439 break; >> 1440 } >> 1441 if (vma->vm_end > end) { >> 1442 ret = split_vma(mm, vma, end, 0); >> 1443 if (ret) >> 1444 break; >> 1445 } >> 1446 next: 1466 /* 1447 /* 1467 * In the vma_merge() success 1448 * In the vma_merge() successful mprotect-like case 8: 1468 * the next vma was merged in 1449 * the next vma was merged into the current one and 1469 * the current one has not be 1450 * the current one has not been updated yet. 1470 */ 1451 */ 1471 vma_start_write(vma); !! 1452 vma->vm_flags = new_flags; 1472 userfaultfd_set_vm_flags(vma, << 1473 vma->vm_userfaultfd_ctx.ctx = 1453 vma->vm_userfaultfd_ctx.ctx = ctx; 1474 1454 1475 if (is_vm_hugetlb_page(vma) & 1455 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) 1476 hugetlb_unshare_all_p 1456 hugetlb_unshare_all_pmds(vma); 1477 1457 1478 skip: 1458 skip: 1479 prev = vma; 1459 prev = vma; 1480 start = vma->vm_end; 1460 start = vma->vm_end; 1481 } !! 1461 vma = vma->vm_next; 1482 !! 1462 } while (vma && vma->vm_start < end); 1483 out_unlock: 1463 out_unlock: 1484 mmap_write_unlock(mm); 1464 mmap_write_unlock(mm); 1485 mmput(mm); 1465 mmput(mm); 1486 if (!ret) { 1466 if (!ret) { 1487 __u64 ioctls_out; 1467 __u64 ioctls_out; 1488 1468 1489 ioctls_out = basic_ioctls ? U 1469 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : 1490 UFFD_API_RANGE_IOCTLS; 1470 UFFD_API_RANGE_IOCTLS; 1491 1471 1492 /* 1472 /* 1493 * Declare the WP ioctl only 1473 * Declare the WP ioctl only if the WP mode is 1494 * specified and all checks p 1474 * specified and all checks passed with the range 1495 */ 1475 */ 1496 if (!(uffdio_register.mode & 1476 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) 1497 ioctls_out &= ~((__u6 1477 ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); 1498 1478 1499 /* CONTINUE ioctl is only sup 1479 /* CONTINUE ioctl is only supported for MINOR ranges. */ 1500 if (!(uffdio_register.mode & 1480 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) 1501 ioctls_out &= ~((__u6 1481 ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); 1502 1482 1503 /* 1483 /* 1504 * Now that we scanned all vm 1484 * Now that we scanned all vmas we can already tell 1505 * userland which ioctls meth 1485 * userland which ioctls methods are guaranteed to 1506 * succeed on this range. 1486 * succeed on this range. 1507 */ 1487 */ 1508 if (put_user(ioctls_out, &use 1488 if (put_user(ioctls_out, &user_uffdio_register->ioctls)) 1509 ret = -EFAULT; 1489 ret = -EFAULT; 1510 } 1490 } 1511 out: 1491 out: 1512 return ret; 1492 return ret; 1513 } 1493 } 1514 1494 1515 static int userfaultfd_unregister(struct user 1495 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, 1516 unsigned lo 1496 unsigned long arg) 1517 { 1497 { 1518 struct mm_struct *mm = ctx->mm; 1498 struct mm_struct *mm = ctx->mm; 1519 struct vm_area_struct *vma, *prev, *c 1499 struct vm_area_struct *vma, *prev, *cur; 1520 int ret; 1500 int ret; 1521 struct uffdio_range uffdio_unregister 1501 struct uffdio_range uffdio_unregister; 1522 unsigned long new_flags; 1502 unsigned long new_flags; 1523 bool found; 1503 bool found; 1524 unsigned long start, end, vma_end; 1504 unsigned long start, end, vma_end; 1525 const void __user *buf = (void __user 1505 const void __user *buf = (void __user *)arg; 1526 struct vma_iterator vmi; << 1527 bool wp_async = userfaultfd_wp_async_ << 1528 1506 1529 ret = -EFAULT; 1507 ret = -EFAULT; 1530 if (copy_from_user(&uffdio_unregister 1508 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) 1531 goto out; 1509 goto out; 1532 1510 1533 ret = validate_range(mm, uffdio_unreg 1511 ret = validate_range(mm, uffdio_unregister.start, 1534 uffdio_unregiste 1512 uffdio_unregister.len); 1535 if (ret) 1513 if (ret) 1536 goto out; 1514 goto out; 1537 1515 1538 start = uffdio_unregister.start; 1516 start = uffdio_unregister.start; 1539 end = start + uffdio_unregister.len; 1517 end = start + uffdio_unregister.len; 1540 1518 1541 ret = -ENOMEM; 1519 ret = -ENOMEM; 1542 if (!mmget_not_zero(mm)) 1520 if (!mmget_not_zero(mm)) 1543 goto out; 1521 goto out; 1544 1522 1545 mmap_write_lock(mm); 1523 mmap_write_lock(mm); 1546 ret = -EINVAL; !! 1524 vma = find_vma_prev(mm, start, &prev); 1547 vma_iter_init(&vmi, mm, start); << 1548 vma = vma_find(&vmi, end); << 1549 if (!vma) 1525 if (!vma) 1550 goto out_unlock; 1526 goto out_unlock; 1551 1527 >> 1528 /* check that there's at least one vma in the range */ >> 1529 ret = -EINVAL; >> 1530 if (vma->vm_start >= end) >> 1531 goto out_unlock; >> 1532 1552 /* 1533 /* 1553 * If the first vma contains huge pag 1534 * If the first vma contains huge pages, make sure start address 1554 * is aligned to huge page size. 1535 * is aligned to huge page size. 1555 */ 1536 */ 1556 if (is_vm_hugetlb_page(vma)) { 1537 if (is_vm_hugetlb_page(vma)) { 1557 unsigned long vma_hpagesize = 1538 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1558 1539 1559 if (start & (vma_hpagesize - 1540 if (start & (vma_hpagesize - 1)) 1560 goto out_unlock; 1541 goto out_unlock; 1561 } 1542 } 1562 1543 1563 /* 1544 /* 1564 * Search for not compatible vmas. 1545 * Search for not compatible vmas. 1565 */ 1546 */ 1566 found = false; 1547 found = false; 1567 cur = vma; !! 1548 ret = -EINVAL; 1568 do { !! 1549 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { 1569 cond_resched(); 1550 cond_resched(); 1570 1551 1571 BUG_ON(!!cur->vm_userfaultfd_ 1552 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 1572 !!(cur->vm_flags & __V 1553 !!(cur->vm_flags & __VM_UFFD_FLAGS)); 1573 1554 1574 /* 1555 /* 1575 * Check not compatible vmas, 1556 * Check not compatible vmas, not strictly required 1576 * here as not compatible vma 1557 * here as not compatible vmas cannot have an 1577 * userfaultfd_ctx registered 1558 * userfaultfd_ctx registered on them, but this 1578 * provides for more strict b 1559 * provides for more strict behavior to notice 1579 * unregistration errors. 1560 * unregistration errors. 1580 */ 1561 */ 1581 if (!vma_can_userfault(cur, c !! 1562 if (!vma_can_userfault(cur, cur->vm_flags)) 1582 goto out_unlock; 1563 goto out_unlock; 1583 1564 1584 found = true; 1565 found = true; 1585 } for_each_vma_range(vmi, cur, end); !! 1566 } 1586 BUG_ON(!found); 1567 BUG_ON(!found); 1587 1568 1588 vma_iter_set(&vmi, start); << 1589 prev = vma_prev(&vmi); << 1590 if (vma->vm_start < start) 1569 if (vma->vm_start < start) 1591 prev = vma; 1570 prev = vma; 1592 1571 1593 ret = 0; 1572 ret = 0; 1594 for_each_vma_range(vmi, vma, end) { !! 1573 do { 1595 cond_resched(); 1574 cond_resched(); 1596 1575 1597 BUG_ON(!vma_can_userfault(vma !! 1576 BUG_ON(!vma_can_userfault(vma, vma->vm_flags)); 1598 1577 1599 /* 1578 /* 1600 * Nothing to do: this vma is 1579 * Nothing to do: this vma is already registered into this 1601 * userfaultfd and with the r 1580 * userfaultfd and with the right tracking mode too. 1602 */ 1581 */ 1603 if (!vma->vm_userfaultfd_ctx. 1582 if (!vma->vm_userfaultfd_ctx.ctx) 1604 goto skip; 1583 goto skip; 1605 1584 1606 WARN_ON(!(vma->vm_flags & VM_ 1585 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1607 1586 1608 if (vma->vm_start > start) 1587 if (vma->vm_start > start) 1609 start = vma->vm_start 1588 start = vma->vm_start; 1610 vma_end = min(end, vma->vm_en 1589 vma_end = min(end, vma->vm_end); 1611 1590 1612 if (userfaultfd_missing(vma)) 1591 if (userfaultfd_missing(vma)) { 1613 /* 1592 /* 1614 * Wake any concurren 1593 * Wake any concurrent pending userfault while 1615 * we unregister, so 1594 * we unregister, so they will not hang 1616 * permanently and it 1595 * permanently and it avoids userland to call 1617 * UFFDIO_WAKE explic 1596 * UFFDIO_WAKE explicitly. 1618 */ 1597 */ 1619 struct userfaultfd_wa 1598 struct userfaultfd_wake_range range; 1620 range.start = start; 1599 range.start = start; 1621 range.len = vma_end - 1600 range.len = vma_end - start; 1622 wake_userfault(vma->v 1601 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); 1623 } 1602 } 1624 1603 1625 /* Reset ptes for the whole v 1604 /* Reset ptes for the whole vma range if wr-protected */ 1626 if (userfaultfd_wp(vma)) 1605 if (userfaultfd_wp(vma)) 1627 uffd_wp_range(vma, st !! 1606 uffd_wp_range(mm, vma, start, vma_end - start, false); 1628 1607 1629 new_flags = vma->vm_flags & ~ 1608 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 1630 vma = vma_modify_flags_uffd(& !! 1609 prev = vma_merge(mm, prev, start, vma_end, new_flags, 1631 n !! 1610 vma->anon_vma, vma->vm_file, vma->vm_pgoff, 1632 if (IS_ERR(vma)) { !! 1611 vma_policy(vma), 1633 ret = PTR_ERR(vma); !! 1612 NULL_VM_UFFD_CTX, anon_vma_name(vma)); 1634 break; !! 1613 if (prev) { >> 1614 vma = prev; >> 1615 goto next; 1635 } 1616 } 1636 !! 1617 if (vma->vm_start < start) { >> 1618 ret = split_vma(mm, vma, start, 1); >> 1619 if (ret) >> 1620 break; >> 1621 } >> 1622 if (vma->vm_end > end) { >> 1623 ret = split_vma(mm, vma, end, 0); >> 1624 if (ret) >> 1625 break; >> 1626 } >> 1627 next: 1637 /* 1628 /* 1638 * In the vma_merge() success 1629 * In the vma_merge() successful mprotect-like case 8: 1639 * the next vma was merged in 1630 * the next vma was merged into the current one and 1640 * the current one has not be 1631 * the current one has not been updated yet. 1641 */ 1632 */ 1642 vma_start_write(vma); !! 1633 vma->vm_flags = new_flags; 1643 userfaultfd_set_vm_flags(vma, << 1644 vma->vm_userfaultfd_ctx = NUL 1634 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 1645 1635 1646 skip: 1636 skip: 1647 prev = vma; 1637 prev = vma; 1648 start = vma->vm_end; 1638 start = vma->vm_end; 1649 } !! 1639 vma = vma->vm_next; 1650 !! 1640 } while (vma && vma->vm_start < end); 1651 out_unlock: 1641 out_unlock: 1652 mmap_write_unlock(mm); 1642 mmap_write_unlock(mm); 1653 mmput(mm); 1643 mmput(mm); 1654 out: 1644 out: 1655 return ret; 1645 return ret; 1656 } 1646 } 1657 1647 1658 /* 1648 /* 1659 * userfaultfd_wake may be used in combinatio 1649 * userfaultfd_wake may be used in combination with the 1660 * UFFDIO_*_MODE_DONTWAKE to wakeup userfault 1650 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. 1661 */ 1651 */ 1662 static int userfaultfd_wake(struct userfaultf 1652 static int userfaultfd_wake(struct userfaultfd_ctx *ctx, 1663 unsigned long arg 1653 unsigned long arg) 1664 { 1654 { 1665 int ret; 1655 int ret; 1666 struct uffdio_range uffdio_wake; 1656 struct uffdio_range uffdio_wake; 1667 struct userfaultfd_wake_range range; 1657 struct userfaultfd_wake_range range; 1668 const void __user *buf = (void __user 1658 const void __user *buf = (void __user *)arg; 1669 1659 1670 ret = -EFAULT; 1660 ret = -EFAULT; 1671 if (copy_from_user(&uffdio_wake, buf, 1661 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) 1672 goto out; 1662 goto out; 1673 1663 1674 ret = validate_range(ctx->mm, uffdio_ 1664 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); 1675 if (ret) 1665 if (ret) 1676 goto out; 1666 goto out; 1677 1667 1678 range.start = uffdio_wake.start; 1668 range.start = uffdio_wake.start; 1679 range.len = uffdio_wake.len; 1669 range.len = uffdio_wake.len; 1680 1670 1681 /* 1671 /* 1682 * len == 0 means wake all and we don 1672 * len == 0 means wake all and we don't want to wake all here, 1683 * so check it again to be sure. 1673 * so check it again to be sure. 1684 */ 1674 */ 1685 VM_BUG_ON(!range.len); 1675 VM_BUG_ON(!range.len); 1686 1676 1687 wake_userfault(ctx, &range); 1677 wake_userfault(ctx, &range); 1688 ret = 0; 1678 ret = 0; 1689 1679 1690 out: 1680 out: 1691 return ret; 1681 return ret; 1692 } 1682 } 1693 1683 1694 static int userfaultfd_copy(struct userfaultf 1684 static int userfaultfd_copy(struct userfaultfd_ctx *ctx, 1695 unsigned long arg 1685 unsigned long arg) 1696 { 1686 { 1697 __s64 ret; 1687 __s64 ret; 1698 struct uffdio_copy uffdio_copy; 1688 struct uffdio_copy uffdio_copy; 1699 struct uffdio_copy __user *user_uffdi 1689 struct uffdio_copy __user *user_uffdio_copy; 1700 struct userfaultfd_wake_range range; 1690 struct userfaultfd_wake_range range; 1701 uffd_flags_t flags = 0; << 1702 1691 1703 user_uffdio_copy = (struct uffdio_cop 1692 user_uffdio_copy = (struct uffdio_copy __user *) arg; 1704 1693 1705 ret = -EAGAIN; 1694 ret = -EAGAIN; 1706 if (atomic_read(&ctx->mmap_changing)) 1695 if (atomic_read(&ctx->mmap_changing)) 1707 goto out; 1696 goto out; 1708 1697 1709 ret = -EFAULT; 1698 ret = -EFAULT; 1710 if (copy_from_user(&uffdio_copy, user 1699 if (copy_from_user(&uffdio_copy, user_uffdio_copy, 1711 /* don't copy "cop 1700 /* don't copy "copy" last field */ 1712 sizeof(uffdio_copy 1701 sizeof(uffdio_copy)-sizeof(__s64))) 1713 goto out; 1702 goto out; 1714 1703 1715 ret = validate_unaligned_range(ctx->m << 1716 uffdio << 1717 if (ret) << 1718 goto out; << 1719 ret = validate_range(ctx->mm, uffdio_ 1704 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); 1720 if (ret) 1705 if (ret) 1721 goto out; 1706 goto out; 1722 !! 1707 /* >> 1708 * double check for wraparound just in case. copy_from_user() >> 1709 * will later check uffdio_copy.src + uffdio_copy.len to fit >> 1710 * in the userland range. >> 1711 */ 1723 ret = -EINVAL; 1712 ret = -EINVAL; >> 1713 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) >> 1714 goto out; 1724 if (uffdio_copy.mode & ~(UFFDIO_COPY_ 1715 if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) 1725 goto out; 1716 goto out; 1726 if (uffdio_copy.mode & UFFDIO_COPY_MO << 1727 flags |= MFILL_ATOMIC_WP; << 1728 if (mmget_not_zero(ctx->mm)) { 1717 if (mmget_not_zero(ctx->mm)) { 1729 ret = mfill_atomic_copy(ctx, !! 1718 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, 1730 uffdi !! 1719 uffdio_copy.len, &ctx->mmap_changing, >> 1720 uffdio_copy.mode); 1731 mmput(ctx->mm); 1721 mmput(ctx->mm); 1732 } else { 1722 } else { 1733 return -ESRCH; 1723 return -ESRCH; 1734 } 1724 } 1735 if (unlikely(put_user(ret, &user_uffd 1725 if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1736 return -EFAULT; 1726 return -EFAULT; 1737 if (ret < 0) 1727 if (ret < 0) 1738 goto out; 1728 goto out; 1739 BUG_ON(!ret); 1729 BUG_ON(!ret); 1740 /* len == 0 would wake all */ 1730 /* len == 0 would wake all */ 1741 range.len = ret; 1731 range.len = ret; 1742 if (!(uffdio_copy.mode & UFFDIO_COPY_ 1732 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { 1743 range.start = uffdio_copy.dst 1733 range.start = uffdio_copy.dst; 1744 wake_userfault(ctx, &range); 1734 wake_userfault(ctx, &range); 1745 } 1735 } 1746 ret = range.len == uffdio_copy.len ? 1736 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; 1747 out: 1737 out: 1748 return ret; 1738 return ret; 1749 } 1739 } 1750 1740 1751 static int userfaultfd_zeropage(struct userfa 1741 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, 1752 unsigned long 1742 unsigned long arg) 1753 { 1743 { 1754 __s64 ret; 1744 __s64 ret; 1755 struct uffdio_zeropage uffdio_zeropag 1745 struct uffdio_zeropage uffdio_zeropage; 1756 struct uffdio_zeropage __user *user_u 1746 struct uffdio_zeropage __user *user_uffdio_zeropage; 1757 struct userfaultfd_wake_range range; 1747 struct userfaultfd_wake_range range; 1758 1748 1759 user_uffdio_zeropage = (struct uffdio 1749 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; 1760 1750 1761 ret = -EAGAIN; 1751 ret = -EAGAIN; 1762 if (atomic_read(&ctx->mmap_changing)) 1752 if (atomic_read(&ctx->mmap_changing)) 1763 goto out; 1753 goto out; 1764 1754 1765 ret = -EFAULT; 1755 ret = -EFAULT; 1766 if (copy_from_user(&uffdio_zeropage, 1756 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, 1767 /* don't copy "zer 1757 /* don't copy "zeropage" last field */ 1768 sizeof(uffdio_zero 1758 sizeof(uffdio_zeropage)-sizeof(__s64))) 1769 goto out; 1759 goto out; 1770 1760 1771 ret = validate_range(ctx->mm, uffdio_ 1761 ret = validate_range(ctx->mm, uffdio_zeropage.range.start, 1772 uffdio_zeropage. 1762 uffdio_zeropage.range.len); 1773 if (ret) 1763 if (ret) 1774 goto out; 1764 goto out; 1775 ret = -EINVAL; 1765 ret = -EINVAL; 1776 if (uffdio_zeropage.mode & ~UFFDIO_ZE 1766 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) 1777 goto out; 1767 goto out; 1778 1768 1779 if (mmget_not_zero(ctx->mm)) { 1769 if (mmget_not_zero(ctx->mm)) { 1780 ret = mfill_atomic_zeropage(c !! 1770 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, 1781 uf !! 1771 uffdio_zeropage.range.len, >> 1772 &ctx->mmap_changing); 1782 mmput(ctx->mm); 1773 mmput(ctx->mm); 1783 } else { 1774 } else { 1784 return -ESRCH; 1775 return -ESRCH; 1785 } 1776 } 1786 if (unlikely(put_user(ret, &user_uffd 1777 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1787 return -EFAULT; 1778 return -EFAULT; 1788 if (ret < 0) 1779 if (ret < 0) 1789 goto out; 1780 goto out; 1790 /* len == 0 would wake all */ 1781 /* len == 0 would wake all */ 1791 BUG_ON(!ret); 1782 BUG_ON(!ret); 1792 range.len = ret; 1783 range.len = ret; 1793 if (!(uffdio_zeropage.mode & UFFDIO_Z 1784 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { 1794 range.start = uffdio_zeropage 1785 range.start = uffdio_zeropage.range.start; 1795 wake_userfault(ctx, &range); 1786 wake_userfault(ctx, &range); 1796 } 1787 } 1797 ret = range.len == uffdio_zeropage.ra 1788 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; 1798 out: 1789 out: 1799 return ret; 1790 return ret; 1800 } 1791 } 1801 1792 1802 static int userfaultfd_writeprotect(struct us 1793 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, 1803 unsigned 1794 unsigned long arg) 1804 { 1795 { 1805 int ret; 1796 int ret; 1806 struct uffdio_writeprotect uffdio_wp; 1797 struct uffdio_writeprotect uffdio_wp; 1807 struct uffdio_writeprotect __user *us 1798 struct uffdio_writeprotect __user *user_uffdio_wp; 1808 struct userfaultfd_wake_range range; 1799 struct userfaultfd_wake_range range; 1809 bool mode_wp, mode_dontwake; 1800 bool mode_wp, mode_dontwake; 1810 1801 1811 if (atomic_read(&ctx->mmap_changing)) 1802 if (atomic_read(&ctx->mmap_changing)) 1812 return -EAGAIN; 1803 return -EAGAIN; 1813 1804 1814 user_uffdio_wp = (struct uffdio_write 1805 user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; 1815 1806 1816 if (copy_from_user(&uffdio_wp, user_u 1807 if (copy_from_user(&uffdio_wp, user_uffdio_wp, 1817 sizeof(struct uffd 1808 sizeof(struct uffdio_writeprotect))) 1818 return -EFAULT; 1809 return -EFAULT; 1819 1810 1820 ret = validate_range(ctx->mm, uffdio_ 1811 ret = validate_range(ctx->mm, uffdio_wp.range.start, 1821 uffdio_wp.range. 1812 uffdio_wp.range.len); 1822 if (ret) 1813 if (ret) 1823 return ret; 1814 return ret; 1824 1815 1825 if (uffdio_wp.mode & ~(UFFDIO_WRITEPR 1816 if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | 1826 UFFDIO_WRITEPR 1817 UFFDIO_WRITEPROTECT_MODE_WP)) 1827 return -EINVAL; 1818 return -EINVAL; 1828 1819 1829 mode_wp = uffdio_wp.mode & UFFDIO_WRI 1820 mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; 1830 mode_dontwake = uffdio_wp.mode & UFFD 1821 mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; 1831 1822 1832 if (mode_wp && mode_dontwake) 1823 if (mode_wp && mode_dontwake) 1833 return -EINVAL; 1824 return -EINVAL; 1834 1825 1835 if (mmget_not_zero(ctx->mm)) { 1826 if (mmget_not_zero(ctx->mm)) { 1836 ret = mwriteprotect_range(ctx !! 1827 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, 1837 uff !! 1828 uffdio_wp.range.len, mode_wp, >> 1829 &ctx->mmap_changing); 1838 mmput(ctx->mm); 1830 mmput(ctx->mm); 1839 } else { 1831 } else { 1840 return -ESRCH; 1832 return -ESRCH; 1841 } 1833 } 1842 1834 1843 if (ret) 1835 if (ret) 1844 return ret; 1836 return ret; 1845 1837 1846 if (!mode_wp && !mode_dontwake) { 1838 if (!mode_wp && !mode_dontwake) { 1847 range.start = uffdio_wp.range 1839 range.start = uffdio_wp.range.start; 1848 range.len = uffdio_wp.range.l 1840 range.len = uffdio_wp.range.len; 1849 wake_userfault(ctx, &range); 1841 wake_userfault(ctx, &range); 1850 } 1842 } 1851 return ret; 1843 return ret; 1852 } 1844 } 1853 1845 1854 static int userfaultfd_continue(struct userfa 1846 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) 1855 { 1847 { 1856 __s64 ret; 1848 __s64 ret; 1857 struct uffdio_continue uffdio_continu 1849 struct uffdio_continue uffdio_continue; 1858 struct uffdio_continue __user *user_u 1850 struct uffdio_continue __user *user_uffdio_continue; 1859 struct userfaultfd_wake_range range; 1851 struct userfaultfd_wake_range range; 1860 uffd_flags_t flags = 0; << 1861 1852 1862 user_uffdio_continue = (struct uffdio 1853 user_uffdio_continue = (struct uffdio_continue __user *)arg; 1863 1854 1864 ret = -EAGAIN; 1855 ret = -EAGAIN; 1865 if (atomic_read(&ctx->mmap_changing)) 1856 if (atomic_read(&ctx->mmap_changing)) 1866 goto out; 1857 goto out; 1867 1858 1868 ret = -EFAULT; 1859 ret = -EFAULT; 1869 if (copy_from_user(&uffdio_continue, 1860 if (copy_from_user(&uffdio_continue, user_uffdio_continue, 1870 /* don't copy the 1861 /* don't copy the output fields */ 1871 sizeof(uffdio_cont 1862 sizeof(uffdio_continue) - (sizeof(__s64)))) 1872 goto out; 1863 goto out; 1873 1864 1874 ret = validate_range(ctx->mm, uffdio_ 1865 ret = validate_range(ctx->mm, uffdio_continue.range.start, 1875 uffdio_continue. 1866 uffdio_continue.range.len); 1876 if (ret) 1867 if (ret) 1877 goto out; 1868 goto out; 1878 1869 1879 ret = -EINVAL; 1870 ret = -EINVAL; 1880 if (uffdio_continue.mode & ~(UFFDIO_C !! 1871 /* double check for wraparound just in case. */ 1881 UFFDIO_C !! 1872 if (uffdio_continue.range.start + uffdio_continue.range.len <= >> 1873 uffdio_continue.range.start) { >> 1874 goto out; >> 1875 } >> 1876 if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE) 1882 goto out; 1877 goto out; 1883 if (uffdio_continue.mode & UFFDIO_CON << 1884 flags |= MFILL_ATOMIC_WP; << 1885 1878 1886 if (mmget_not_zero(ctx->mm)) { 1879 if (mmget_not_zero(ctx->mm)) { 1887 ret = mfill_atomic_continue(c !! 1880 ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, 1888 u !! 1881 uffdio_continue.range.len, >> 1882 &ctx->mmap_changing); 1889 mmput(ctx->mm); 1883 mmput(ctx->mm); 1890 } else { 1884 } else { 1891 return -ESRCH; 1885 return -ESRCH; 1892 } 1886 } 1893 1887 1894 if (unlikely(put_user(ret, &user_uffd 1888 if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) 1895 return -EFAULT; 1889 return -EFAULT; 1896 if (ret < 0) 1890 if (ret < 0) 1897 goto out; 1891 goto out; 1898 1892 1899 /* len == 0 would wake all */ 1893 /* len == 0 would wake all */ 1900 BUG_ON(!ret); 1894 BUG_ON(!ret); 1901 range.len = ret; 1895 range.len = ret; 1902 if (!(uffdio_continue.mode & UFFDIO_C 1896 if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { 1903 range.start = uffdio_continue 1897 range.start = uffdio_continue.range.start; 1904 wake_userfault(ctx, &range); 1898 wake_userfault(ctx, &range); 1905 } 1899 } 1906 ret = range.len == uffdio_continue.ra 1900 ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; 1907 1901 1908 out: 1902 out: 1909 return ret; 1903 return ret; 1910 } 1904 } 1911 1905 1912 static inline int userfaultfd_poison(struct u << 1913 { << 1914 __s64 ret; << 1915 struct uffdio_poison uffdio_poison; << 1916 struct uffdio_poison __user *user_uff << 1917 struct userfaultfd_wake_range range; << 1918 << 1919 user_uffdio_poison = (struct uffdio_p << 1920 << 1921 ret = -EAGAIN; << 1922 if (atomic_read(&ctx->mmap_changing)) << 1923 goto out; << 1924 << 1925 ret = -EFAULT; << 1926 if (copy_from_user(&uffdio_poison, us << 1927 /* don't copy the << 1928 sizeof(uffdio_pois << 1929 goto out; << 1930 << 1931 ret = validate_range(ctx->mm, uffdio_ << 1932 uffdio_poison.ra << 1933 if (ret) << 1934 goto out; << 1935 << 1936 ret = -EINVAL; << 1937 if (uffdio_poison.mode & ~UFFDIO_POIS << 1938 goto out; << 1939 << 1940 if (mmget_not_zero(ctx->mm)) { << 1941 ret = mfill_atomic_poison(ctx << 1942 uff << 1943 mmput(ctx->mm); << 1944 } else { << 1945 return -ESRCH; << 1946 } << 1947 << 1948 if (unlikely(put_user(ret, &user_uffd << 1949 return -EFAULT; << 1950 if (ret < 0) << 1951 goto out; << 1952 << 1953 /* len == 0 would wake all */ << 1954 BUG_ON(!ret); << 1955 range.len = ret; << 1956 if (!(uffdio_poison.mode & UFFDIO_POI << 1957 range.start = uffdio_poison.r << 1958 wake_userfault(ctx, &range); << 1959 } << 1960 ret = range.len == uffdio_poison.rang << 1961 << 1962 out: << 1963 return ret; << 1964 } << 1965 << 1966 bool userfaultfd_wp_async(struct vm_area_stru << 1967 { << 1968 return userfaultfd_wp_async_ctx(vma-> << 1969 } << 1970 << 1971 static inline unsigned int uffd_ctx_features( 1906 static inline unsigned int uffd_ctx_features(__u64 user_features) 1972 { 1907 { 1973 /* 1908 /* 1974 * For the current set of features th 1909 * For the current set of features the bits just coincide. Set 1975 * UFFD_FEATURE_INITIALIZED to mark t 1910 * UFFD_FEATURE_INITIALIZED to mark the features as enabled. 1976 */ 1911 */ 1977 return (unsigned int)user_features | 1912 return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; 1978 } 1913 } 1979 1914 1980 static int userfaultfd_move(struct userfaultf << 1981 unsigned long arg << 1982 { << 1983 __s64 ret; << 1984 struct uffdio_move uffdio_move; << 1985 struct uffdio_move __user *user_uffdi << 1986 struct userfaultfd_wake_range range; << 1987 struct mm_struct *mm = ctx->mm; << 1988 << 1989 user_uffdio_move = (struct uffdio_mov << 1990 << 1991 if (atomic_read(&ctx->mmap_changing)) << 1992 return -EAGAIN; << 1993 << 1994 if (copy_from_user(&uffdio_move, user << 1995 /* don't copy "mov << 1996 sizeof(uffdio_move << 1997 return -EFAULT; << 1998 << 1999 /* Do not allow cross-mm moves. */ << 2000 if (mm != current->mm) << 2001 return -EINVAL; << 2002 << 2003 ret = validate_range(mm, uffdio_move. << 2004 if (ret) << 2005 return ret; << 2006 << 2007 ret = validate_range(mm, uffdio_move. << 2008 if (ret) << 2009 return ret; << 2010 << 2011 if (uffdio_move.mode & ~(UFFDIO_MOVE_ << 2012 UFFDIO_MOVE << 2013 return -EINVAL; << 2014 << 2015 if (mmget_not_zero(mm)) { << 2016 ret = move_pages(ctx, uffdio_ << 2017 uffdio_move. << 2018 mmput(mm); << 2019 } else { << 2020 return -ESRCH; << 2021 } << 2022 << 2023 if (unlikely(put_user(ret, &user_uffd << 2024 return -EFAULT; << 2025 if (ret < 0) << 2026 goto out; << 2027 << 2028 /* len == 0 would wake all */ << 2029 VM_WARN_ON(!ret); << 2030 range.len = ret; << 2031 if (!(uffdio_move.mode & UFFDIO_MOVE_ << 2032 range.start = uffdio_move.dst << 2033 wake_userfault(ctx, &range); << 2034 } << 2035 ret = range.len == uffdio_move.len ? << 2036 << 2037 out: << 2038 return ret; << 2039 } << 2040 << 2041 /* 1915 /* 2042 * userland asks for a certain API version an 1916 * userland asks for a certain API version and we return which bits 2043 * and ioctl commands are implemented in this 1917 * and ioctl commands are implemented in this kernel for such API 2044 * version or -EINVAL if unknown. 1918 * version or -EINVAL if unknown. 2045 */ 1919 */ 2046 static int userfaultfd_api(struct userfaultfd 1920 static int userfaultfd_api(struct userfaultfd_ctx *ctx, 2047 unsigned long arg) 1921 unsigned long arg) 2048 { 1922 { 2049 struct uffdio_api uffdio_api; 1923 struct uffdio_api uffdio_api; 2050 void __user *buf = (void __user *)arg 1924 void __user *buf = (void __user *)arg; 2051 unsigned int ctx_features; 1925 unsigned int ctx_features; 2052 int ret; 1926 int ret; 2053 __u64 features; 1927 __u64 features; 2054 1928 2055 ret = -EFAULT; 1929 ret = -EFAULT; 2056 if (copy_from_user(&uffdio_api, buf, 1930 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) 2057 goto out; 1931 goto out; 2058 features = uffdio_api.features; !! 1932 /* Ignore unsupported features (userspace built against newer kernel) */ 2059 ret = -EINVAL; !! 1933 features = uffdio_api.features & UFFD_API_FEATURES; 2060 if (uffdio_api.api != UFFD_API) << 2061 goto err_out; << 2062 ret = -EPERM; 1934 ret = -EPERM; 2063 if ((features & UFFD_FEATURE_EVENT_FO 1935 if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) 2064 goto err_out; 1936 goto err_out; 2065 << 2066 /* WP_ASYNC relies on WP_UNPOPULATED, << 2067 if (features & UFFD_FEATURE_WP_ASYNC) << 2068 features |= UFFD_FEATURE_WP_U << 2069 << 2070 /* report all available features and 1937 /* report all available features and ioctls to userland */ 2071 uffdio_api.features = UFFD_API_FEATUR 1938 uffdio_api.features = UFFD_API_FEATURES; 2072 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 1939 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 2073 uffdio_api.features &= 1940 uffdio_api.features &= 2074 ~(UFFD_FEATURE_MINOR_HUGETLBF 1941 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); 2075 #endif 1942 #endif 2076 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1943 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 2077 uffdio_api.features &= ~UFFD_FEATURE_ 1944 uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; 2078 #endif 1945 #endif 2079 #ifndef CONFIG_PTE_MARKER_UFFD_WP 1946 #ifndef CONFIG_PTE_MARKER_UFFD_WP 2080 uffdio_api.features &= ~UFFD_FEATURE_ 1947 uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; 2081 uffdio_api.features &= ~UFFD_FEATURE_ << 2082 uffdio_api.features &= ~UFFD_FEATURE_ << 2083 #endif 1948 #endif 2084 << 2085 ret = -EINVAL; << 2086 if (features & ~uffdio_api.features) << 2087 goto err_out; << 2088 << 2089 uffdio_api.ioctls = UFFD_API_IOCTLS; 1949 uffdio_api.ioctls = UFFD_API_IOCTLS; 2090 ret = -EFAULT; 1950 ret = -EFAULT; 2091 if (copy_to_user(buf, &uffdio_api, si 1951 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 2092 goto out; 1952 goto out; 2093 1953 2094 /* only enable the requested features 1954 /* only enable the requested features for this uffd context */ 2095 ctx_features = uffd_ctx_features(feat 1955 ctx_features = uffd_ctx_features(features); 2096 ret = -EINVAL; 1956 ret = -EINVAL; 2097 if (cmpxchg(&ctx->features, 0, ctx_fe 1957 if (cmpxchg(&ctx->features, 0, ctx_features) != 0) 2098 goto err_out; 1958 goto err_out; 2099 1959 2100 ret = 0; 1960 ret = 0; 2101 out: 1961 out: 2102 return ret; 1962 return ret; 2103 err_out: 1963 err_out: 2104 memset(&uffdio_api, 0, sizeof(uffdio_ 1964 memset(&uffdio_api, 0, sizeof(uffdio_api)); 2105 if (copy_to_user(buf, &uffdio_api, si 1965 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 2106 ret = -EFAULT; 1966 ret = -EFAULT; 2107 goto out; 1967 goto out; 2108 } 1968 } 2109 1969 2110 static long userfaultfd_ioctl(struct file *fi 1970 static long userfaultfd_ioctl(struct file *file, unsigned cmd, 2111 unsigned long a 1971 unsigned long arg) 2112 { 1972 { 2113 int ret = -EINVAL; 1973 int ret = -EINVAL; 2114 struct userfaultfd_ctx *ctx = file->p 1974 struct userfaultfd_ctx *ctx = file->private_data; 2115 1975 2116 if (cmd != UFFDIO_API && !userfaultfd 1976 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) 2117 return -EINVAL; 1977 return -EINVAL; 2118 1978 2119 switch(cmd) { 1979 switch(cmd) { 2120 case UFFDIO_API: 1980 case UFFDIO_API: 2121 ret = userfaultfd_api(ctx, ar 1981 ret = userfaultfd_api(ctx, arg); 2122 break; 1982 break; 2123 case UFFDIO_REGISTER: 1983 case UFFDIO_REGISTER: 2124 ret = userfaultfd_register(ct 1984 ret = userfaultfd_register(ctx, arg); 2125 break; 1985 break; 2126 case UFFDIO_UNREGISTER: 1986 case UFFDIO_UNREGISTER: 2127 ret = userfaultfd_unregister( 1987 ret = userfaultfd_unregister(ctx, arg); 2128 break; 1988 break; 2129 case UFFDIO_WAKE: 1989 case UFFDIO_WAKE: 2130 ret = userfaultfd_wake(ctx, a 1990 ret = userfaultfd_wake(ctx, arg); 2131 break; 1991 break; 2132 case UFFDIO_COPY: 1992 case UFFDIO_COPY: 2133 ret = userfaultfd_copy(ctx, a 1993 ret = userfaultfd_copy(ctx, arg); 2134 break; 1994 break; 2135 case UFFDIO_ZEROPAGE: 1995 case UFFDIO_ZEROPAGE: 2136 ret = userfaultfd_zeropage(ct 1996 ret = userfaultfd_zeropage(ctx, arg); 2137 break; 1997 break; 2138 case UFFDIO_MOVE: << 2139 ret = userfaultfd_move(ctx, a << 2140 break; << 2141 case UFFDIO_WRITEPROTECT: 1998 case UFFDIO_WRITEPROTECT: 2142 ret = userfaultfd_writeprotec 1999 ret = userfaultfd_writeprotect(ctx, arg); 2143 break; 2000 break; 2144 case UFFDIO_CONTINUE: 2001 case UFFDIO_CONTINUE: 2145 ret = userfaultfd_continue(ct 2002 ret = userfaultfd_continue(ctx, arg); 2146 break; 2003 break; 2147 case UFFDIO_POISON: << 2148 ret = userfaultfd_poison(ctx, << 2149 break; << 2150 } 2004 } 2151 return ret; 2005 return ret; 2152 } 2006 } 2153 2007 2154 #ifdef CONFIG_PROC_FS 2008 #ifdef CONFIG_PROC_FS 2155 static void userfaultfd_show_fdinfo(struct se 2009 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) 2156 { 2010 { 2157 struct userfaultfd_ctx *ctx = f->priv 2011 struct userfaultfd_ctx *ctx = f->private_data; 2158 wait_queue_entry_t *wq; 2012 wait_queue_entry_t *wq; 2159 unsigned long pending = 0, total = 0; 2013 unsigned long pending = 0, total = 0; 2160 2014 2161 spin_lock_irq(&ctx->fault_pending_wqh 2015 spin_lock_irq(&ctx->fault_pending_wqh.lock); 2162 list_for_each_entry(wq, &ctx->fault_p 2016 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { 2163 pending++; 2017 pending++; 2164 total++; 2018 total++; 2165 } 2019 } 2166 list_for_each_entry(wq, &ctx->fault_w 2020 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { 2167 total++; 2021 total++; 2168 } 2022 } 2169 spin_unlock_irq(&ctx->fault_pending_w 2023 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 2170 2024 2171 /* 2025 /* 2172 * If more protocols will be added, t 2026 * If more protocols will be added, there will be all shown 2173 * separated by a space. Like this: 2027 * separated by a space. Like this: 2174 * protocols: aa:... bb:... 2028 * protocols: aa:... bb:... 2175 */ 2029 */ 2176 seq_printf(m, "pending:\t%lu\ntotal:\ 2030 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 2177 pending, total, UFFD_API, 2031 pending, total, UFFD_API, ctx->features, 2178 UFFD_API_IOCTLS|UFFD_API_R 2032 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 2179 } 2033 } 2180 #endif 2034 #endif 2181 2035 2182 static const struct file_operations userfault 2036 static const struct file_operations userfaultfd_fops = { 2183 #ifdef CONFIG_PROC_FS 2037 #ifdef CONFIG_PROC_FS 2184 .show_fdinfo = userfaultfd_show_fd 2038 .show_fdinfo = userfaultfd_show_fdinfo, 2185 #endif 2039 #endif 2186 .release = userfaultfd_release 2040 .release = userfaultfd_release, 2187 .poll = userfaultfd_poll, 2041 .poll = userfaultfd_poll, 2188 .read_iter = userfaultfd_read_it !! 2042 .read = userfaultfd_read, 2189 .unlocked_ioctl = userfaultfd_ioctl, 2043 .unlocked_ioctl = userfaultfd_ioctl, 2190 .compat_ioctl = compat_ptr_ioctl, 2044 .compat_ioctl = compat_ptr_ioctl, 2191 .llseek = noop_llseek, 2045 .llseek = noop_llseek, 2192 }; 2046 }; 2193 2047 2194 static void init_once_userfaultfd_ctx(void *m 2048 static void init_once_userfaultfd_ctx(void *mem) 2195 { 2049 { 2196 struct userfaultfd_ctx *ctx = (struct 2050 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; 2197 2051 2198 init_waitqueue_head(&ctx->fault_pendi 2052 init_waitqueue_head(&ctx->fault_pending_wqh); 2199 init_waitqueue_head(&ctx->fault_wqh); 2053 init_waitqueue_head(&ctx->fault_wqh); 2200 init_waitqueue_head(&ctx->event_wqh); 2054 init_waitqueue_head(&ctx->event_wqh); 2201 init_waitqueue_head(&ctx->fd_wqh); 2055 init_waitqueue_head(&ctx->fd_wqh); 2202 seqcount_spinlock_init(&ctx->refile_s 2056 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); 2203 } 2057 } 2204 2058 2205 static int new_userfaultfd(int flags) !! 2059 SYSCALL_DEFINE1(userfaultfd, int, flags) 2206 { 2060 { 2207 struct userfaultfd_ctx *ctx; 2061 struct userfaultfd_ctx *ctx; 2208 struct file *file; << 2209 int fd; 2062 int fd; 2210 2063 >> 2064 if (!sysctl_unprivileged_userfaultfd && >> 2065 (flags & UFFD_USER_MODE_ONLY) == 0 && >> 2066 !capable(CAP_SYS_PTRACE)) { >> 2067 printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " >> 2068 "sysctl knob to 1 if kernel faults must be handled " >> 2069 "without obtaining CAP_SYS_PTRACE capability\n"); >> 2070 return -EPERM; >> 2071 } >> 2072 2211 BUG_ON(!current->mm); 2073 BUG_ON(!current->mm); 2212 2074 2213 /* Check the UFFD_* constants for con 2075 /* Check the UFFD_* constants for consistency. */ 2214 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UF 2076 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); 2215 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXE 2077 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); 2216 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBL 2078 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); 2217 2079 2218 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS 2080 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) 2219 return -EINVAL; 2081 return -EINVAL; 2220 2082 2221 ctx = kmem_cache_alloc(userfaultfd_ct 2083 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 2222 if (!ctx) 2084 if (!ctx) 2223 return -ENOMEM; 2085 return -ENOMEM; 2224 2086 2225 refcount_set(&ctx->refcount, 1); 2087 refcount_set(&ctx->refcount, 1); 2226 ctx->flags = flags; 2088 ctx->flags = flags; 2227 ctx->features = 0; 2089 ctx->features = 0; 2228 ctx->released = false; 2090 ctx->released = false; 2229 init_rwsem(&ctx->map_changing_lock); << 2230 atomic_set(&ctx->mmap_changing, 0); 2091 atomic_set(&ctx->mmap_changing, 0); 2231 ctx->mm = current->mm; 2092 ctx->mm = current->mm; >> 2093 /* prevent the mm struct to be freed */ >> 2094 mmgrab(ctx->mm); 2232 2095 2233 fd = get_unused_fd_flags(flags & UFFD !! 2096 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, 2234 if (fd < 0) << 2235 goto err_out; << 2236 << 2237 /* Create a new inode so that the LSM << 2238 file = anon_inode_create_getfile("[us << 2239 O_RDONLY | (flags & U 2097 O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); 2240 if (IS_ERR(file)) { !! 2098 if (fd < 0) { 2241 put_unused_fd(fd); !! 2099 mmdrop(ctx->mm); 2242 fd = PTR_ERR(file); !! 2100 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 2243 goto err_out; << 2244 } 2101 } 2245 /* prevent the mm struct to be freed << 2246 mmgrab(ctx->mm); << 2247 file->f_mode |= FMODE_NOWAIT; << 2248 fd_install(fd, file); << 2249 return fd; 2102 return fd; 2250 err_out: << 2251 kmem_cache_free(userfaultfd_ctx_cache << 2252 return fd; << 2253 } << 2254 << 2255 static inline bool userfaultfd_syscall_allowe << 2256 { << 2257 /* Userspace-only page faults are alw << 2258 if (flags & UFFD_USER_MODE_ONLY) << 2259 return true; << 2260 << 2261 /* << 2262 * The user is requesting a userfault << 2263 * Privileged users are always allowe << 2264 */ << 2265 if (capable(CAP_SYS_PTRACE)) << 2266 return true; << 2267 << 2268 /* Otherwise, access to kernel fault << 2269 return sysctl_unprivileged_userfaultf << 2270 } << 2271 << 2272 SYSCALL_DEFINE1(userfaultfd, int, flags) << 2273 { << 2274 if (!userfaultfd_syscall_allowed(flag << 2275 return -EPERM; << 2276 << 2277 return new_userfaultfd(flags); << 2278 } 2103 } 2279 2104 2280 static long userfaultfd_dev_ioctl(struct file << 2281 { << 2282 if (cmd != USERFAULTFD_IOC_NEW) << 2283 return -EINVAL; << 2284 << 2285 return new_userfaultfd(flags); << 2286 } << 2287 << 2288 static const struct file_operations userfault << 2289 .unlocked_ioctl = userfaultfd_dev_ioc << 2290 .compat_ioctl = userfaultfd_dev_ioctl << 2291 .owner = THIS_MODULE, << 2292 .llseek = noop_llseek, << 2293 }; << 2294 << 2295 static struct miscdevice userfaultfd_misc = { << 2296 .minor = MISC_DYNAMIC_MINOR, << 2297 .name = "userfaultfd", << 2298 .fops = &userfaultfd_dev_fops << 2299 }; << 2300 << 2301 static int __init userfaultfd_init(void) 2105 static int __init userfaultfd_init(void) 2302 { 2106 { 2303 int ret; << 2304 << 2305 ret = misc_register(&userfaultfd_misc << 2306 if (ret) << 2307 return ret; << 2308 << 2309 userfaultfd_ctx_cachep = kmem_cache_c 2107 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", 2310 2108 sizeof(struct userfaultfd_ctx), 2311 2109 0, 2312 2110 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2313 2111 init_once_userfaultfd_ctx); 2314 #ifdef CONFIG_SYSCTL << 2315 register_sysctl_init("vm", vm_userfau << 2316 #endif << 2317 return 0; 2112 return 0; 2318 } 2113 } 2319 __initcall(userfaultfd_init); 2114 __initcall(userfaultfd_init); 2320 2115
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.