~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/mlock.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *      linux/mm/mlock.c
  4  *
  5  *  (C) Copyright 1995 Linus Torvalds
  6  *  (C) Copyright 2002 Christoph Hellwig
  7  */
  8 
  9 #include <linux/capability.h>
 10 #include <linux/mman.h>
 11 #include <linux/mm.h>
 12 #include <linux/sched/user.h>
 13 #include <linux/swap.h>
 14 #include <linux/swapops.h>
 15 #include <linux/pagemap.h>
 16 #include <linux/pagevec.h>
 17 #include <linux/pagewalk.h>
 18 #include <linux/mempolicy.h>
 19 #include <linux/syscalls.h>
 20 #include <linux/sched.h>
 21 #include <linux/export.h>
 22 #include <linux/rmap.h>
 23 #include <linux/mmzone.h>
 24 #include <linux/hugetlb.h>
 25 #include <linux/memcontrol.h>
 26 #include <linux/mm_inline.h>
 27 #include <linux/secretmem.h>
 28 
 29 #include "internal.h"
 30 
 31 struct mlock_fbatch {
 32         local_lock_t lock;
 33         struct folio_batch fbatch;
 34 };
 35 
 36 static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
 37         .lock = INIT_LOCAL_LOCK(lock),
 38 };
 39 
 40 bool can_do_mlock(void)
 41 {
 42         if (rlimit(RLIMIT_MEMLOCK) != 0)
 43                 return true;
 44         if (capable(CAP_IPC_LOCK))
 45                 return true;
 46         return false;
 47 }
 48 EXPORT_SYMBOL(can_do_mlock);
 49 
 50 /*
 51  * Mlocked folios are marked with the PG_mlocked flag for efficient testing
 52  * in vmscan and, possibly, the fault path; and to support semi-accurate
 53  * statistics.
 54  *
 55  * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
 56  * will be ostensibly placed on the LRU "unevictable" list (actually no such
 57  * list exists), rather than the [in]active lists. PG_unevictable is set to
 58  * indicate the unevictable state.
 59  */
 60 
 61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
 62 {
 63         /* There is nothing more we can do while it's off LRU */
 64         if (!folio_test_clear_lru(folio))
 65                 return lruvec;
 66 
 67         lruvec = folio_lruvec_relock_irq(folio, lruvec);
 68 
 69         if (unlikely(folio_evictable(folio))) {
 70                 /*
 71                  * This is a little surprising, but quite possible: PG_mlocked
 72                  * must have got cleared already by another CPU.  Could this
 73                  * folio be unevictable?  I'm not sure, but move it now if so.
 74                  */
 75                 if (folio_test_unevictable(folio)) {
 76                         lruvec_del_folio(lruvec, folio);
 77                         folio_clear_unevictable(folio);
 78                         lruvec_add_folio(lruvec, folio);
 79 
 80                         __count_vm_events(UNEVICTABLE_PGRESCUED,
 81                                           folio_nr_pages(folio));
 82                 }
 83                 goto out;
 84         }
 85 
 86         if (folio_test_unevictable(folio)) {
 87                 if (folio_test_mlocked(folio))
 88                         folio->mlock_count++;
 89                 goto out;
 90         }
 91 
 92         lruvec_del_folio(lruvec, folio);
 93         folio_clear_active(folio);
 94         folio_set_unevictable(folio);
 95         folio->mlock_count = !!folio_test_mlocked(folio);
 96         lruvec_add_folio(lruvec, folio);
 97         __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
 98 out:
 99         folio_set_lru(folio);
100         return lruvec;
101 }
102 
103 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104 {
105         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106 
107         lruvec = folio_lruvec_relock_irq(folio, lruvec);
108 
109         /* As above, this is a little surprising, but possible */
110         if (unlikely(folio_evictable(folio)))
111                 goto out;
112 
113         folio_set_unevictable(folio);
114         folio->mlock_count = !!folio_test_mlocked(folio);
115         __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116 out:
117         lruvec_add_folio(lruvec, folio);
118         folio_set_lru(folio);
119         return lruvec;
120 }
121 
122 static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123 {
124         int nr_pages = folio_nr_pages(folio);
125         bool isolated = false;
126 
127         if (!folio_test_clear_lru(folio))
128                 goto munlock;
129 
130         isolated = true;
131         lruvec = folio_lruvec_relock_irq(folio, lruvec);
132 
133         if (folio_test_unevictable(folio)) {
134                 /* Then mlock_count is maintained, but might undercount */
135                 if (folio->mlock_count)
136                         folio->mlock_count--;
137                 if (folio->mlock_count)
138                         goto out;
139         }
140         /* else assume that was the last mlock: reclaim will fix it if not */
141 
142 munlock:
143         if (folio_test_clear_mlocked(folio)) {
144                 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145                 if (isolated || !folio_test_unevictable(folio))
146                         __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147                 else
148                         __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149         }
150 
151         /* folio_evictable() has to be checked *after* clearing Mlocked */
152         if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153                 lruvec_del_folio(lruvec, folio);
154                 folio_clear_unevictable(folio);
155                 lruvec_add_folio(lruvec, folio);
156                 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157         }
158 out:
159         if (isolated)
160                 folio_set_lru(folio);
161         return lruvec;
162 }
163 
164 /*
165  * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
166  */
167 #define LRU_FOLIO 0x1
168 #define NEW_FOLIO 0x2
169 static inline struct folio *mlock_lru(struct folio *folio)
170 {
171         return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172 }
173 
174 static inline struct folio *mlock_new(struct folio *folio)
175 {
176         return (struct folio *)((unsigned long)folio + NEW_FOLIO);
177 }
178 
179 /*
180  * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181  * make use of such folio pointer flags in future, but for now just keep it for
182  * mlock.  We could use three separate folio batches instead, but one feels
183  * better (munlocking a full folio batch does not need to drain mlocking folio
184  * batches first).
185  */
186 static void mlock_folio_batch(struct folio_batch *fbatch)
187 {
188         struct lruvec *lruvec = NULL;
189         unsigned long mlock;
190         struct folio *folio;
191         int i;
192 
193         for (i = 0; i < folio_batch_count(fbatch); i++) {
194                 folio = fbatch->folios[i];
195                 mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196                 folio = (struct folio *)((unsigned long)folio - mlock);
197                 fbatch->folios[i] = folio;
198 
199                 if (mlock & LRU_FOLIO)
200                         lruvec = __mlock_folio(folio, lruvec);
201                 else if (mlock & NEW_FOLIO)
202                         lruvec = __mlock_new_folio(folio, lruvec);
203                 else
204                         lruvec = __munlock_folio(folio, lruvec);
205         }
206 
207         if (lruvec)
208                 unlock_page_lruvec_irq(lruvec);
209         folios_put(fbatch);
210 }
211 
212 void mlock_drain_local(void)
213 {
214         struct folio_batch *fbatch;
215 
216         local_lock(&mlock_fbatch.lock);
217         fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
218         if (folio_batch_count(fbatch))
219                 mlock_folio_batch(fbatch);
220         local_unlock(&mlock_fbatch.lock);
221 }
222 
223 void mlock_drain_remote(int cpu)
224 {
225         struct folio_batch *fbatch;
226 
227         WARN_ON_ONCE(cpu_online(cpu));
228         fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
229         if (folio_batch_count(fbatch))
230                 mlock_folio_batch(fbatch);
231 }
232 
233 bool need_mlock_drain(int cpu)
234 {
235         return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
236 }
237 
238 /**
239  * mlock_folio - mlock a folio already on (or temporarily off) LRU
240  * @folio: folio to be mlocked.
241  */
242 void mlock_folio(struct folio *folio)
243 {
244         struct folio_batch *fbatch;
245 
246         local_lock(&mlock_fbatch.lock);
247         fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
248 
249         if (!folio_test_set_mlocked(folio)) {
250                 int nr_pages = folio_nr_pages(folio);
251 
252                 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
253                 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
254         }
255 
256         folio_get(folio);
257         if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
258             folio_test_large(folio) || lru_cache_disabled())
259                 mlock_folio_batch(fbatch);
260         local_unlock(&mlock_fbatch.lock);
261 }
262 
263 /**
264  * mlock_new_folio - mlock a newly allocated folio not yet on LRU
265  * @folio: folio to be mlocked, either normal or a THP head.
266  */
267 void mlock_new_folio(struct folio *folio)
268 {
269         struct folio_batch *fbatch;
270         int nr_pages = folio_nr_pages(folio);
271 
272         local_lock(&mlock_fbatch.lock);
273         fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
274         folio_set_mlocked(folio);
275 
276         zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
277         __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
278 
279         folio_get(folio);
280         if (!folio_batch_add(fbatch, mlock_new(folio)) ||
281             folio_test_large(folio) || lru_cache_disabled())
282                 mlock_folio_batch(fbatch);
283         local_unlock(&mlock_fbatch.lock);
284 }
285 
286 /**
287  * munlock_folio - munlock a folio
288  * @folio: folio to be munlocked, either normal or a THP head.
289  */
290 void munlock_folio(struct folio *folio)
291 {
292         struct folio_batch *fbatch;
293 
294         local_lock(&mlock_fbatch.lock);
295         fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
296         /*
297          * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
298          * which will check whether the folio is multiply mlocked.
299          */
300         folio_get(folio);
301         if (!folio_batch_add(fbatch, folio) ||
302             folio_test_large(folio) || lru_cache_disabled())
303                 mlock_folio_batch(fbatch);
304         local_unlock(&mlock_fbatch.lock);
305 }
306 
307 static inline unsigned int folio_mlock_step(struct folio *folio,
308                 pte_t *pte, unsigned long addr, unsigned long end)
309 {
310         const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
311         unsigned int count = (end - addr) >> PAGE_SHIFT;
312         pte_t ptent = ptep_get(pte);
313 
314         if (!folio_test_large(folio))
315                 return 1;
316 
317         return folio_pte_batch(folio, addr, pte, ptent, count, fpb_flags, NULL,
318                                NULL, NULL);
319 }
320 
321 static inline bool allow_mlock_munlock(struct folio *folio,
322                 struct vm_area_struct *vma, unsigned long start,
323                 unsigned long end, unsigned int step)
324 {
325         /*
326          * For unlock, allow munlock large folio which is partially
327          * mapped to VMA. As it's possible that large folio is
328          * mlocked and VMA is split later.
329          *
330          * During memory pressure, such kind of large folio can
331          * be split. And the pages are not in VM_LOCKed VMA
332          * can be reclaimed.
333          */
334         if (!(vma->vm_flags & VM_LOCKED))
335                 return true;
336 
337         /* folio_within_range() cannot take KSM, but any small folio is OK */
338         if (!folio_test_large(folio))
339                 return true;
340 
341         /* folio not in range [start, end), skip mlock */
342         if (!folio_within_range(folio, vma, start, end))
343                 return false;
344 
345         /* folio is not fully mapped, skip mlock */
346         if (step != folio_nr_pages(folio))
347                 return false;
348 
349         return true;
350 }
351 
352 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
353                            unsigned long end, struct mm_walk *walk)
354 
355 {
356         struct vm_area_struct *vma = walk->vma;
357         spinlock_t *ptl;
358         pte_t *start_pte, *pte;
359         pte_t ptent;
360         struct folio *folio;
361         unsigned int step = 1;
362         unsigned long start = addr;
363 
364         ptl = pmd_trans_huge_lock(pmd, vma);
365         if (ptl) {
366                 if (!pmd_present(*pmd))
367                         goto out;
368                 if (is_huge_zero_pmd(*pmd))
369                         goto out;
370                 folio = pmd_folio(*pmd);
371                 if (vma->vm_flags & VM_LOCKED)
372                         mlock_folio(folio);
373                 else
374                         munlock_folio(folio);
375                 goto out;
376         }
377 
378         start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
379         if (!start_pte) {
380                 walk->action = ACTION_AGAIN;
381                 return 0;
382         }
383 
384         for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
385                 ptent = ptep_get(pte);
386                 if (!pte_present(ptent))
387                         continue;
388                 folio = vm_normal_folio(vma, addr, ptent);
389                 if (!folio || folio_is_zone_device(folio))
390                         continue;
391 
392                 step = folio_mlock_step(folio, pte, addr, end);
393                 if (!allow_mlock_munlock(folio, vma, start, end, step))
394                         goto next_entry;
395 
396                 if (vma->vm_flags & VM_LOCKED)
397                         mlock_folio(folio);
398                 else
399                         munlock_folio(folio);
400 
401 next_entry:
402                 pte += step - 1;
403                 addr += (step - 1) << PAGE_SHIFT;
404         }
405         pte_unmap(start_pte);
406 out:
407         spin_unlock(ptl);
408         cond_resched();
409         return 0;
410 }
411 
412 /*
413  * mlock_vma_pages_range() - mlock any pages already in the range,
414  *                           or munlock all pages in the range.
415  * @vma - vma containing range to be mlock()ed or munlock()ed
416  * @start - start address in @vma of the range
417  * @end - end of range in @vma
418  * @newflags - the new set of flags for @vma.
419  *
420  * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
421  * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
422  */
423 static void mlock_vma_pages_range(struct vm_area_struct *vma,
424         unsigned long start, unsigned long end, vm_flags_t newflags)
425 {
426         static const struct mm_walk_ops mlock_walk_ops = {
427                 .pmd_entry = mlock_pte_range,
428                 .walk_lock = PGWALK_WRLOCK_VERIFY,
429         };
430 
431         /*
432          * There is a slight chance that concurrent page migration,
433          * or page reclaim finding a page of this now-VM_LOCKED vma,
434          * will call mlock_vma_folio() and raise page's mlock_count:
435          * double counting, leaving the page unevictable indefinitely.
436          * Communicate this danger to mlock_vma_folio() with VM_IO,
437          * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
438          * mmap_lock is held in write mode here, so this weird
439          * combination should not be visible to other mmap_lock users;
440          * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
441          */
442         if (newflags & VM_LOCKED)
443                 newflags |= VM_IO;
444         vma_start_write(vma);
445         vm_flags_reset_once(vma, newflags);
446 
447         lru_add_drain();
448         walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
449         lru_add_drain();
450 
451         if (newflags & VM_IO) {
452                 newflags &= ~VM_IO;
453                 vm_flags_reset_once(vma, newflags);
454         }
455 }
456 
457 /*
458  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
459  *
460  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
461  * munlock is a no-op.  However, for some special vmas, we go ahead and
462  * populate the ptes.
463  *
464  * For vmas that pass the filters, merge/split as appropriate.
465  */
466 static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
467                struct vm_area_struct **prev, unsigned long start,
468                unsigned long end, vm_flags_t newflags)
469 {
470         struct mm_struct *mm = vma->vm_mm;
471         int nr_pages;
472         int ret = 0;
473         vm_flags_t oldflags = vma->vm_flags;
474 
475         if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
476             is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
477             vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
478                 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
479                 goto out;
480 
481         vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
482         if (IS_ERR(vma)) {
483                 ret = PTR_ERR(vma);
484                 goto out;
485         }
486 
487         /*
488          * Keep track of amount of locked VM.
489          */
490         nr_pages = (end - start) >> PAGE_SHIFT;
491         if (!(newflags & VM_LOCKED))
492                 nr_pages = -nr_pages;
493         else if (oldflags & VM_LOCKED)
494                 nr_pages = 0;
495         mm->locked_vm += nr_pages;
496 
497         /*
498          * vm_flags is protected by the mmap_lock held in write mode.
499          * It's okay if try_to_unmap_one unmaps a page just after we
500          * set VM_LOCKED, populate_vma_page_range will bring it back.
501          */
502         if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
503                 /* No work to do, and mlocking twice would be wrong */
504                 vma_start_write(vma);
505                 vm_flags_reset(vma, newflags);
506         } else {
507                 mlock_vma_pages_range(vma, start, end, newflags);
508         }
509 out:
510         *prev = vma;
511         return ret;
512 }
513 
514 static int apply_vma_lock_flags(unsigned long start, size_t len,
515                                 vm_flags_t flags)
516 {
517         unsigned long nstart, end, tmp;
518         struct vm_area_struct *vma, *prev;
519         VMA_ITERATOR(vmi, current->mm, start);
520 
521         VM_BUG_ON(offset_in_page(start));
522         VM_BUG_ON(len != PAGE_ALIGN(len));
523         end = start + len;
524         if (end < start)
525                 return -EINVAL;
526         if (end == start)
527                 return 0;
528         vma = vma_iter_load(&vmi);
529         if (!vma)
530                 return -ENOMEM;
531 
532         prev = vma_prev(&vmi);
533         if (start > vma->vm_start)
534                 prev = vma;
535 
536         nstart = start;
537         tmp = vma->vm_start;
538         for_each_vma_range(vmi, vma, end) {
539                 int error;
540                 vm_flags_t newflags;
541 
542                 if (vma->vm_start != tmp)
543                         return -ENOMEM;
544 
545                 newflags = vma->vm_flags & ~VM_LOCKED_MASK;
546                 newflags |= flags;
547                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
548                 tmp = vma->vm_end;
549                 if (tmp > end)
550                         tmp = end;
551                 error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
552                 if (error)
553                         return error;
554                 tmp = vma_iter_end(&vmi);
555                 nstart = tmp;
556         }
557 
558         if (tmp < end)
559                 return -ENOMEM;
560 
561         return 0;
562 }
563 
564 /*
565  * Go through vma areas and sum size of mlocked
566  * vma pages, as return value.
567  * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
568  * is also counted.
569  * Return value: previously mlocked page counts
570  */
571 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
572                 unsigned long start, size_t len)
573 {
574         struct vm_area_struct *vma;
575         unsigned long count = 0;
576         unsigned long end;
577         VMA_ITERATOR(vmi, mm, start);
578 
579         /* Don't overflow past ULONG_MAX */
580         if (unlikely(ULONG_MAX - len < start))
581                 end = ULONG_MAX;
582         else
583                 end = start + len;
584 
585         for_each_vma_range(vmi, vma, end) {
586                 if (vma->vm_flags & VM_LOCKED) {
587                         if (start > vma->vm_start)
588                                 count -= (start - vma->vm_start);
589                         if (end < vma->vm_end) {
590                                 count += end - vma->vm_start;
591                                 break;
592                         }
593                         count += vma->vm_end - vma->vm_start;
594                 }
595         }
596 
597         return count >> PAGE_SHIFT;
598 }
599 
600 /*
601  * convert get_user_pages() return value to posix mlock() error
602  */
603 static int __mlock_posix_error_return(long retval)
604 {
605         if (retval == -EFAULT)
606                 retval = -ENOMEM;
607         else if (retval == -ENOMEM)
608                 retval = -EAGAIN;
609         return retval;
610 }
611 
612 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
613 {
614         unsigned long locked;
615         unsigned long lock_limit;
616         int error = -ENOMEM;
617 
618         start = untagged_addr(start);
619 
620         if (!can_do_mlock())
621                 return -EPERM;
622 
623         len = PAGE_ALIGN(len + (offset_in_page(start)));
624         start &= PAGE_MASK;
625 
626         lock_limit = rlimit(RLIMIT_MEMLOCK);
627         lock_limit >>= PAGE_SHIFT;
628         locked = len >> PAGE_SHIFT;
629 
630         if (mmap_write_lock_killable(current->mm))
631                 return -EINTR;
632 
633         locked += current->mm->locked_vm;
634         if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
635                 /*
636                  * It is possible that the regions requested intersect with
637                  * previously mlocked areas, that part area in "mm->locked_vm"
638                  * should not be counted to new mlock increment count. So check
639                  * and adjust locked count if necessary.
640                  */
641                 locked -= count_mm_mlocked_page_nr(current->mm,
642                                 start, len);
643         }
644 
645         /* check against resource limits */
646         if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
647                 error = apply_vma_lock_flags(start, len, flags);
648 
649         mmap_write_unlock(current->mm);
650         if (error)
651                 return error;
652 
653         error = __mm_populate(start, len, 0);
654         if (error)
655                 return __mlock_posix_error_return(error);
656         return 0;
657 }
658 
659 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
660 {
661         return do_mlock(start, len, VM_LOCKED);
662 }
663 
664 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
665 {
666         vm_flags_t vm_flags = VM_LOCKED;
667 
668         if (flags & ~MLOCK_ONFAULT)
669                 return -EINVAL;
670 
671         if (flags & MLOCK_ONFAULT)
672                 vm_flags |= VM_LOCKONFAULT;
673 
674         return do_mlock(start, len, vm_flags);
675 }
676 
677 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
678 {
679         int ret;
680 
681         start = untagged_addr(start);
682 
683         len = PAGE_ALIGN(len + (offset_in_page(start)));
684         start &= PAGE_MASK;
685 
686         if (mmap_write_lock_killable(current->mm))
687                 return -EINTR;
688         ret = apply_vma_lock_flags(start, len, 0);
689         mmap_write_unlock(current->mm);
690 
691         return ret;
692 }
693 
694 /*
695  * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
696  * and translate into the appropriate modifications to mm->def_flags and/or the
697  * flags for all current VMAs.
698  *
699  * There are a couple of subtleties with this.  If mlockall() is called multiple
700  * times with different flags, the values do not necessarily stack.  If mlockall
701  * is called once including the MCL_FUTURE flag and then a second time without
702  * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
703  */
704 static int apply_mlockall_flags(int flags)
705 {
706         VMA_ITERATOR(vmi, current->mm, 0);
707         struct vm_area_struct *vma, *prev = NULL;
708         vm_flags_t to_add = 0;
709 
710         current->mm->def_flags &= ~VM_LOCKED_MASK;
711         if (flags & MCL_FUTURE) {
712                 current->mm->def_flags |= VM_LOCKED;
713 
714                 if (flags & MCL_ONFAULT)
715                         current->mm->def_flags |= VM_LOCKONFAULT;
716 
717                 if (!(flags & MCL_CURRENT))
718                         goto out;
719         }
720 
721         if (flags & MCL_CURRENT) {
722                 to_add |= VM_LOCKED;
723                 if (flags & MCL_ONFAULT)
724                         to_add |= VM_LOCKONFAULT;
725         }
726 
727         for_each_vma(vmi, vma) {
728                 vm_flags_t newflags;
729 
730                 newflags = vma->vm_flags & ~VM_LOCKED_MASK;
731                 newflags |= to_add;
732 
733                 /* Ignore errors */
734                 mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
735                             newflags);
736                 cond_resched();
737         }
738 out:
739         return 0;
740 }
741 
742 SYSCALL_DEFINE1(mlockall, int, flags)
743 {
744         unsigned long lock_limit;
745         int ret;
746 
747         if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
748             flags == MCL_ONFAULT)
749                 return -EINVAL;
750 
751         if (!can_do_mlock())
752                 return -EPERM;
753 
754         lock_limit = rlimit(RLIMIT_MEMLOCK);
755         lock_limit >>= PAGE_SHIFT;
756 
757         if (mmap_write_lock_killable(current->mm))
758                 return -EINTR;
759 
760         ret = -ENOMEM;
761         if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
762             capable(CAP_IPC_LOCK))
763                 ret = apply_mlockall_flags(flags);
764         mmap_write_unlock(current->mm);
765         if (!ret && (flags & MCL_CURRENT))
766                 mm_populate(0, TASK_SIZE);
767 
768         return ret;
769 }
770 
771 SYSCALL_DEFINE0(munlockall)
772 {
773         int ret;
774 
775         if (mmap_write_lock_killable(current->mm))
776                 return -EINTR;
777         ret = apply_mlockall_flags(0);
778         mmap_write_unlock(current->mm);
779         return ret;
780 }
781 
782 /*
783  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
784  * shm segments) get accounted against the user_struct instead.
785  */
786 static DEFINE_SPINLOCK(shmlock_user_lock);
787 
788 int user_shm_lock(size_t size, struct ucounts *ucounts)
789 {
790         unsigned long lock_limit, locked;
791         long memlock;
792         int allowed = 0;
793 
794         locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
795         lock_limit = rlimit(RLIMIT_MEMLOCK);
796         if (lock_limit != RLIM_INFINITY)
797                 lock_limit >>= PAGE_SHIFT;
798         spin_lock(&shmlock_user_lock);
799         memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
800 
801         if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
802                 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
803                 goto out;
804         }
805         if (!get_ucounts(ucounts)) {
806                 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
807                 allowed = 0;
808                 goto out;
809         }
810         allowed = 1;
811 out:
812         spin_unlock(&shmlock_user_lock);
813         return allowed;
814 }
815 
816 void user_shm_unlock(size_t size, struct ucounts *ucounts)
817 {
818         spin_lock(&shmlock_user_lock);
819         dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
820         spin_unlock(&shmlock_user_lock);
821         put_ucounts(ucounts);
822 }
823 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php