~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/vma/vma.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /tools/testing/vma/vma.c (Version linux-6.12-rc7) and /tools/testing/vma/vma.c (Version linux-5.18.19)


  1 // SPDX-License-Identifier: GPL-2.0-or-later        1 
  2                                                   
  3 #include <stdbool.h>                              
  4 #include <stdio.h>                                
  5 #include <stdlib.h>                               
  6                                                   
  7 #include "maple-shared.h"                         
  8 #include "vma_internal.h"                         
  9                                                   
 10 /* Include so header guard set. */                
 11 #include "../../../mm/vma.h"                      
 12                                                   
 13 static bool fail_prealloc;                        
 14                                                   
 15 /* Then override vma_iter_prealloc() so we can    
 16 #define vma_iter_prealloc(vmi, vma)               
 17         (fail_prealloc ? -ENOMEM : mas_preallo    
 18                                                   
 19 /*                                                
 20  * Directly import the VMA implementation here    
 21  * provides userland-equivalent functionality     
 22  */                                               
 23 #include "../../../mm/vma.c"                      
 24                                                   
 25 const struct vm_operations_struct vma_dummy_vm    
 26 static struct anon_vma dummy_anon_vma;            
 27                                                   
 28 #define ASSERT_TRUE(_expr)                        
 29         do {                                      
 30                 if (!(_expr)) {                   
 31                         fprintf(stderr,           
 32                                 "Assert FAILED    
 33                                 __FILE__, __LI    
 34                         return false;             
 35                 }                                 
 36         } while (0)                               
 37 #define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_exp    
 38 #define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_    
 39 #define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_    
 40                                                   
 41 static struct task_struct __current;              
 42                                                   
 43 struct task_struct *get_current(void)             
 44 {                                                 
 45         return &__current;                        
 46 }                                                 
 47                                                   
 48 /* Helper function to simply allocate a VMA. *    
 49 static struct vm_area_struct *alloc_vma(struct    
 50                                         unsign    
 51                                         unsign    
 52                                         pgoff_    
 53                                         vm_fla    
 54 {                                                 
 55         struct vm_area_struct *ret = vm_area_a    
 56                                                   
 57         if (ret == NULL)                          
 58                 return NULL;                      
 59                                                   
 60         ret->vm_start = start;                    
 61         ret->vm_end = end;                        
 62         ret->vm_pgoff = pgoff;                    
 63         ret->__vm_flags = flags;                  
 64                                                   
 65         return ret;                               
 66 }                                                 
 67                                                   
 68 /* Helper function to allocate a VMA and link     
 69 static struct vm_area_struct *alloc_and_link_v    
 70                                                   
 71                                                   
 72                                                   
 73                                                   
 74 {                                                 
 75         struct vm_area_struct *vma = alloc_vma    
 76                                                   
 77         if (vma == NULL)                          
 78                 return NULL;                      
 79                                                   
 80         if (vma_link(mm, vma)) {                  
 81                 vm_area_free(vma);                
 82                 return NULL;                      
 83         }                                         
 84                                                   
 85         /*                                        
 86          * Reset this counter which we use to     
 87          * begun. Linking to the tree will hav    
 88          * which means we will get a false pos    
 89          */                                       
 90         vma->vm_lock_seq = -1;                    
 91                                                   
 92         return vma;                               
 93 }                                                 
 94                                                   
 95 /* Helper function which provides a wrapper ar    
 96 static struct vm_area_struct *merge_new(struct    
 97 {                                                 
 98         /*                                        
 99          * For convenience, get prev and next     
100          * requires.                              
101          */                                       
102         vmg->next = vma_next(vmg->vmi);           
103         vmg->prev = vma_prev(vmg->vmi);           
104         vma_iter_next_range(vmg->vmi);            
105                                                   
106         return vma_merge_new_range(vmg);          
107 }                                                 
108                                                   
109 /*                                                
110  * Helper function which provides a wrapper ar    
111  * operation.                                     
112  */                                               
113 static struct vm_area_struct *merge_existing(s    
114 {                                                 
115         return vma_merge_existing_range(vmg);     
116 }                                                 
117                                                   
118 /*                                                
119  * Helper function which provides a wrapper ar    
120  * VMA.                                           
121  */                                               
122 static int expand_existing(struct vma_merge_st    
123 {                                                 
124         return vma_expand(vmg);                   
125 }                                                 
126                                                   
127 /*                                                
128  * Helper function to reset merge state the as    
129  * specified new range.                           
130  */                                               
131 static void vmg_set_range(struct vma_merge_str    
132                           unsigned long end, p    
133 {                                                 
134         vma_iter_set(vmg->vmi, start);            
135                                                   
136         vmg->prev = NULL;                         
137         vmg->next = NULL;                         
138         vmg->vma = NULL;                          
139                                                   
140         vmg->start = start;                       
141         vmg->end = end;                           
142         vmg->pgoff = pgoff;                       
143         vmg->flags = flags;                       
144 }                                                 
145                                                   
146 /*                                                
147  * Helper function to try to merge a new VMA.     
148  *                                                
149  * Update vmg and the iterator for it and try     
150  * VMA, link it to the maple tree and return i    
151  */                                               
152 static struct vm_area_struct *try_merge_new_vm    
153                                                   
154                                                   
155                                                   
156                                                   
157 {                                                 
158         struct vm_area_struct *merged;            
159                                                   
160         vmg_set_range(vmg, start, end, pgoff,     
161                                                   
162         merged = merge_new(vmg);                  
163         if (merged) {                             
164                 *was_merged = true;               
165                 ASSERT_EQ(vmg->state, VMA_MERG    
166                 return merged;                    
167         }                                         
168                                                   
169         *was_merged = false;                      
170                                                   
171         ASSERT_EQ(vmg->state, VMA_MERGE_NOMERG    
172                                                   
173         return alloc_and_link_vma(mm, start, e    
174 }                                                 
175                                                   
176 /*                                                
177  * Helper function to reset the dummy anon_vma    
178  * duplicated.                                    
179  */                                               
180 static void reset_dummy_anon_vma(void)            
181 {                                                 
182         dummy_anon_vma.was_cloned = false;        
183         dummy_anon_vma.was_unlinked = false;      
184 }                                                 
185                                                   
186 /*                                                
187  * Helper function to remove all VMAs and dest    
188  * a virtual address space. Returns a count of    
189  */                                               
190 static int cleanup_mm(struct mm_struct *mm, st    
191 {                                                 
192         struct vm_area_struct *vma;               
193         int count = 0;                            
194                                                   
195         fail_prealloc = false;                    
196         reset_dummy_anon_vma();                   
197                                                   
198         vma_iter_set(vmi, 0);                     
199         for_each_vma(*vmi, vma) {                 
200                 vm_area_free(vma);                
201                 count++;                          
202         }                                         
203                                                   
204         mtree_destroy(&mm->mm_mt);                
205         mm->map_count = 0;                        
206         return count;                             
207 }                                                 
208                                                   
209 /* Helper function to determine if VMA has had    
210 static bool vma_write_started(struct vm_area_s    
211 {                                                 
212         int seq = vma->vm_lock_seq;               
213                                                   
214         /* We reset after each check. */          
215         vma->vm_lock_seq = -1;                    
216                                                   
217         /* The vma_start_write() stub simply i    
218         return seq > -1;                          
219 }                                                 
220                                                   
221 /* Helper function providing a dummy vm_ops->c    
222 static void dummy_close(struct vm_area_struct     
223 {                                                 
224 }                                                 
225                                                   
226 static bool test_simple_merge(void)               
227 {                                                 
228         struct vm_area_struct *vma;               
229         unsigned long flags = VM_READ | VM_WRI    
230         struct mm_struct mm = {};                 
231         struct vm_area_struct *vma_left = allo    
232         struct vm_area_struct *vma_right = all    
233         VMA_ITERATOR(vmi, &mm, 0x1000);           
234         struct vma_merge_struct vmg = {           
235                 .mm = &mm,                        
236                 .vmi = &vmi,                      
237                 .start = 0x1000,                  
238                 .end = 0x2000,                    
239                 .flags = flags,                   
240                 .pgoff = 1,                       
241         };                                        
242                                                   
243         ASSERT_FALSE(vma_link(&mm, vma_left));    
244         ASSERT_FALSE(vma_link(&mm, vma_right))    
245                                                   
246         vma = merge_new(&vmg);                    
247         ASSERT_NE(vma, NULL);                     
248                                                   
249         ASSERT_EQ(vma->vm_start, 0);              
250         ASSERT_EQ(vma->vm_end, 0x3000);           
251         ASSERT_EQ(vma->vm_pgoff, 0);              
252         ASSERT_EQ(vma->vm_flags, flags);          
253                                                   
254         vm_area_free(vma);                        
255         mtree_destroy(&mm.mm_mt);                 
256                                                   
257         return true;                              
258 }                                                 
259                                                   
260 static bool test_simple_modify(void)              
261 {                                                 
262         struct vm_area_struct *vma;               
263         unsigned long flags = VM_READ | VM_WRI    
264         struct mm_struct mm = {};                 
265         struct vm_area_struct *init_vma = allo    
266         VMA_ITERATOR(vmi, &mm, 0x1000);           
267                                                   
268         ASSERT_FALSE(vma_link(&mm, init_vma));    
269                                                   
270         /*                                        
271          * The flags will not be changed, the     
272          * performs the merge/split only.         
273          */                                       
274         vma = vma_modify_flags(&vmi, init_vma,    
275                                0x1000, 0x2000,    
276         ASSERT_NE(vma, NULL);                     
277         /* We modify the provided VMA, and on     
278         ASSERT_EQ(vma, init_vma);                 
279                                                   
280         ASSERT_EQ(vma->vm_start, 0x1000);         
281         ASSERT_EQ(vma->vm_end, 0x2000);           
282         ASSERT_EQ(vma->vm_pgoff, 1);              
283                                                   
284         /*                                        
285          * Now walk through the three split VM    
286          * expected.                              
287          */                                       
288                                                   
289         vma_iter_set(&vmi, 0);                    
290         vma = vma_iter_load(&vmi);                
291                                                   
292         ASSERT_EQ(vma->vm_start, 0);              
293         ASSERT_EQ(vma->vm_end, 0x1000);           
294         ASSERT_EQ(vma->vm_pgoff, 0);              
295                                                   
296         vm_area_free(vma);                        
297         vma_iter_clear(&vmi);                     
298                                                   
299         vma = vma_next(&vmi);                     
300                                                   
301         ASSERT_EQ(vma->vm_start, 0x1000);         
302         ASSERT_EQ(vma->vm_end, 0x2000);           
303         ASSERT_EQ(vma->vm_pgoff, 1);              
304                                                   
305         vm_area_free(vma);                        
306         vma_iter_clear(&vmi);                     
307                                                   
308         vma = vma_next(&vmi);                     
309                                                   
310         ASSERT_EQ(vma->vm_start, 0x2000);         
311         ASSERT_EQ(vma->vm_end, 0x3000);           
312         ASSERT_EQ(vma->vm_pgoff, 2);              
313                                                   
314         vm_area_free(vma);                        
315         mtree_destroy(&mm.mm_mt);                 
316                                                   
317         return true;                              
318 }                                                 
319                                                   
320 static bool test_simple_expand(void)              
321 {                                                 
322         unsigned long flags = VM_READ | VM_WRI    
323         struct mm_struct mm = {};                 
324         struct vm_area_struct *vma = alloc_vma    
325         VMA_ITERATOR(vmi, &mm, 0);                
326         struct vma_merge_struct vmg = {           
327                 .vmi = &vmi,                      
328                 .vma = vma,                       
329                 .start = 0,                       
330                 .end = 0x3000,                    
331                 .pgoff = 0,                       
332         };                                        
333                                                   
334         ASSERT_FALSE(vma_link(&mm, vma));         
335                                                   
336         ASSERT_FALSE(expand_existing(&vmg));      
337                                                   
338         ASSERT_EQ(vma->vm_start, 0);              
339         ASSERT_EQ(vma->vm_end, 0x3000);           
340         ASSERT_EQ(vma->vm_pgoff, 0);              
341                                                   
342         vm_area_free(vma);                        
343         mtree_destroy(&mm.mm_mt);                 
344                                                   
345         return true;                              
346 }                                                 
347                                                   
348 static bool test_simple_shrink(void)              
349 {                                                 
350         unsigned long flags = VM_READ | VM_WRI    
351         struct mm_struct mm = {};                 
352         struct vm_area_struct *vma = alloc_vma    
353         VMA_ITERATOR(vmi, &mm, 0);                
354                                                   
355         ASSERT_FALSE(vma_link(&mm, vma));         
356                                                   
357         ASSERT_FALSE(vma_shrink(&vmi, vma, 0,     
358                                                   
359         ASSERT_EQ(vma->vm_start, 0);              
360         ASSERT_EQ(vma->vm_end, 0x1000);           
361         ASSERT_EQ(vma->vm_pgoff, 0);              
362                                                   
363         vm_area_free(vma);                        
364         mtree_destroy(&mm.mm_mt);                 
365                                                   
366         return true;                              
367 }                                                 
368                                                   
369 static bool test_merge_new(void)                  
370 {                                                 
371         unsigned long flags = VM_READ | VM_WRI    
372         struct mm_struct mm = {};                 
373         VMA_ITERATOR(vmi, &mm, 0);                
374         struct vma_merge_struct vmg = {           
375                 .mm = &mm,                        
376                 .vmi = &vmi,                      
377         };                                        
378         struct anon_vma_chain dummy_anon_vma_c    
379                 .anon_vma = &dummy_anon_vma,      
380         };                                        
381         struct anon_vma_chain dummy_anon_vma_c    
382                 .anon_vma = &dummy_anon_vma,      
383         };                                        
384         struct anon_vma_chain dummy_anon_vma_c    
385                 .anon_vma = &dummy_anon_vma,      
386         };                                        
387         struct anon_vma_chain dummy_anon_vma_c    
388                 .anon_vma = &dummy_anon_vma,      
389         };                                        
390         const struct vm_operations_struct vm_o    
391                 .close = dummy_close,             
392         };                                        
393         int count;                                
394         struct vm_area_struct *vma, *vma_a, *v    
395         bool merged;                              
396                                                   
397         /*                                        
398          * 0123456789abc                          
399          * AA B       CC                          
400          */                                       
401         vma_a = alloc_and_link_vma(&mm, 0, 0x2    
402         ASSERT_NE(vma_a, NULL);                   
403         /* We give each VMA a single avc so we    
404         INIT_LIST_HEAD(&vma_a->anon_vma_chain)    
405         list_add(&dummy_anon_vma_chain_a.same_    
406                                                   
407         vma_b = alloc_and_link_vma(&mm, 0x3000    
408         ASSERT_NE(vma_b, NULL);                   
409         INIT_LIST_HEAD(&vma_b->anon_vma_chain)    
410         list_add(&dummy_anon_vma_chain_b.same_    
411                                                   
412         vma_c = alloc_and_link_vma(&mm, 0xb000    
413         ASSERT_NE(vma_c, NULL);                   
414         INIT_LIST_HEAD(&vma_c->anon_vma_chain)    
415         list_add(&dummy_anon_vma_chain_c.same_    
416                                                   
417         /*                                        
418          * NO merge.                              
419          *                                        
420          * 0123456789abc                          
421          * AA B   **  CC                          
422          */                                       
423         vma_d = try_merge_new_vma(&mm, &vmg, 0    
424         ASSERT_NE(vma_d, NULL);                   
425         INIT_LIST_HEAD(&vma_d->anon_vma_chain)    
426         list_add(&dummy_anon_vma_chain_d.same_    
427         ASSERT_FALSE(merged);                     
428         ASSERT_EQ(mm.map_count, 4);               
429                                                   
430         /*                                        
431          * Merge BOTH sides.                      
432          *                                        
433          * 0123456789abc                          
434          * AA*B   DD  CC                          
435          */                                       
436         vma_a->vm_ops = &vm_ops; /* This shoul    
437         vma_b->anon_vma = &dummy_anon_vma;        
438         vma = try_merge_new_vma(&mm, &vmg, 0x2    
439         ASSERT_EQ(vma, vma_a);                    
440         /* Merge with A, delete B. */             
441         ASSERT_TRUE(merged);                      
442         ASSERT_EQ(vma->vm_start, 0);              
443         ASSERT_EQ(vma->vm_end, 0x4000);           
444         ASSERT_EQ(vma->vm_pgoff, 0);              
445         ASSERT_EQ(vma->anon_vma, &dummy_anon_v    
446         ASSERT_TRUE(vma_write_started(vma));      
447         ASSERT_EQ(mm.map_count, 3);               
448                                                   
449         /*                                        
450          * Merge to PREVIOUS VMA.                 
451          *                                        
452          * 0123456789abc                          
453          * AAAA*  DD  CC                          
454          */                                       
455         vma = try_merge_new_vma(&mm, &vmg, 0x4    
456         ASSERT_EQ(vma, vma_a);                    
457         /* Extend A. */                           
458         ASSERT_TRUE(merged);                      
459         ASSERT_EQ(vma->vm_start, 0);              
460         ASSERT_EQ(vma->vm_end, 0x5000);           
461         ASSERT_EQ(vma->vm_pgoff, 0);              
462         ASSERT_EQ(vma->anon_vma, &dummy_anon_v    
463         ASSERT_TRUE(vma_write_started(vma));      
464         ASSERT_EQ(mm.map_count, 3);               
465                                                   
466         /*                                        
467          * Merge to NEXT VMA.                     
468          *                                        
469          * 0123456789abc                          
470          * AAAAA *DD  CC                          
471          */                                       
472         vma_d->anon_vma = &dummy_anon_vma;        
473         vma_d->vm_ops = &vm_ops; /* This shoul    
474         vma = try_merge_new_vma(&mm, &vmg, 0x6    
475         ASSERT_EQ(vma, vma_d);                    
476         /* Prepend. */                            
477         ASSERT_TRUE(merged);                      
478         ASSERT_EQ(vma->vm_start, 0x6000);         
479         ASSERT_EQ(vma->vm_end, 0x9000);           
480         ASSERT_EQ(vma->vm_pgoff, 6);              
481         ASSERT_EQ(vma->anon_vma, &dummy_anon_v    
482         ASSERT_TRUE(vma_write_started(vma));      
483         ASSERT_EQ(mm.map_count, 3);               
484                                                   
485         /*                                        
486          * Merge BOTH sides.                      
487          *                                        
488          * 0123456789abc                          
489          * AAAAA*DDD  CC                          
490          */                                       
491         vma_d->vm_ops = NULL; /* This would ot    
492         vma = try_merge_new_vma(&mm, &vmg, 0x5    
493         ASSERT_EQ(vma, vma_a);                    
494         /* Merge with A, delete D. */             
495         ASSERT_TRUE(merged);                      
496         ASSERT_EQ(vma->vm_start, 0);              
497         ASSERT_EQ(vma->vm_end, 0x9000);           
498         ASSERT_EQ(vma->vm_pgoff, 0);              
499         ASSERT_EQ(vma->anon_vma, &dummy_anon_v    
500         ASSERT_TRUE(vma_write_started(vma));      
501         ASSERT_EQ(mm.map_count, 2);               
502                                                   
503         /*                                        
504          * Merge to NEXT VMA.                     
505          *                                        
506          * 0123456789abc                          
507          * AAAAAAAAA *CC                          
508          */                                       
509         vma_c->anon_vma = &dummy_anon_vma;        
510         vma = try_merge_new_vma(&mm, &vmg, 0xa    
511         ASSERT_EQ(vma, vma_c);                    
512         /* Prepend C. */                          
513         ASSERT_TRUE(merged);                      
514         ASSERT_EQ(vma->vm_start, 0xa000);         
515         ASSERT_EQ(vma->vm_end, 0xc000);           
516         ASSERT_EQ(vma->vm_pgoff, 0xa);            
517         ASSERT_EQ(vma->anon_vma, &dummy_anon_v    
518         ASSERT_TRUE(vma_write_started(vma));      
519         ASSERT_EQ(mm.map_count, 2);               
520                                                   
521         /*                                        
522          * Merge BOTH sides.                      
523          *                                        
524          * 0123456789abc                          
525          * AAAAAAAAA*CCC                          
526          */                                       
527         vma = try_merge_new_vma(&mm, &vmg, 0x9    
528         ASSERT_EQ(vma, vma_a);                    
529         /* Extend A and delete C. */              
530         ASSERT_TRUE(merged);                      
531         ASSERT_EQ(vma->vm_start, 0);              
532         ASSERT_EQ(vma->vm_end, 0xc000);           
533         ASSERT_EQ(vma->vm_pgoff, 0);              
534         ASSERT_EQ(vma->anon_vma, &dummy_anon_v    
535         ASSERT_TRUE(vma_write_started(vma));      
536         ASSERT_EQ(mm.map_count, 1);               
537                                                   
538         /*                                        
539          * Final state.                           
540          *                                        
541          * 0123456789abc                          
542          * AAAAAAAAAAAAA                          
543          */                                       
544                                                   
545         count = 0;                                
546         vma_iter_set(&vmi, 0);                    
547         for_each_vma(vmi, vma) {                  
548                 ASSERT_NE(vma, NULL);             
549                 ASSERT_EQ(vma->vm_start, 0);      
550                 ASSERT_EQ(vma->vm_end, 0xc000)    
551                 ASSERT_EQ(vma->vm_pgoff, 0);      
552                 ASSERT_EQ(vma->anon_vma, &dumm    
553                                                   
554                 vm_area_free(vma);                
555                 count++;                          
556         }                                         
557                                                   
558         /* Should only have one VMA left (thou    
559         ASSERT_EQ(count, 1);                      
560                                                   
561         mtree_destroy(&mm.mm_mt);                 
562         return true;                              
563 }                                                 
564                                                   
565 static bool test_vma_merge_special_flags(void)    
566 {                                                 
567         unsigned long flags = VM_READ | VM_WRI    
568         struct mm_struct mm = {};                 
569         VMA_ITERATOR(vmi, &mm, 0);                
570         struct vma_merge_struct vmg = {           
571                 .mm = &mm,                        
572                 .vmi = &vmi,                      
573         };                                        
574         vm_flags_t special_flags[] = { VM_IO,     
575         vm_flags_t all_special_flags = 0;         
576         int i;                                    
577         struct vm_area_struct *vma_left, *vma;    
578                                                   
579         /* Make sure there aren't new VM_SPECI    
580         for (i = 0; i < ARRAY_SIZE(special_fla    
581                 all_special_flags |= special_f    
582         }                                         
583         ASSERT_EQ(all_special_flags, VM_SPECIA    
584                                                   
585         /*                                        
586          * 01234                                  
587          * AAA                                    
588          */                                       
589         vma_left = alloc_and_link_vma(&mm, 0,     
590         ASSERT_NE(vma_left, NULL);                
591                                                   
592         /* 1. Set up new VMA with special flag    
593                                                   
594         /*                                        
595          * 01234                                  
596          * AAA*                                   
597          *                                        
598          * This should merge if not for the VM    
599          */                                       
600         vmg_set_range(&vmg, 0x3000, 0x4000, 3,    
601         for (i = 0; i < ARRAY_SIZE(special_fla    
602                 vm_flags_t special_flag = spec    
603                                                   
604                 vma_left->__vm_flags = flags |    
605                 vmg.flags = flags | special_fl    
606                 vma = merge_new(&vmg);            
607                 ASSERT_EQ(vma, NULL);             
608                 ASSERT_EQ(vmg.state, VMA_MERGE    
609         }                                         
610                                                   
611         /* 2. Modify VMA with special flag tha    
612                                                   
613         /*                                        
614          * 01234                                  
615          * AAAB                                   
616          *                                        
617          * Create a VMA to modify.                
618          */                                       
619         vma = alloc_and_link_vma(&mm, 0x3000,     
620         ASSERT_NE(vma, NULL);                     
621         vmg.vma = vma;                            
622                                                   
623         for (i = 0; i < ARRAY_SIZE(special_fla    
624                 vm_flags_t special_flag = spec    
625                                                   
626                 vma_left->__vm_flags = flags |    
627                 vmg.flags = flags | special_fl    
628                 vma = merge_existing(&vmg);       
629                 ASSERT_EQ(vma, NULL);             
630                 ASSERT_EQ(vmg.state, VMA_MERGE    
631         }                                         
632                                                   
633         cleanup_mm(&mm, &vmi);                    
634         return true;                              
635 }                                                 
636                                                   
637 static bool test_vma_merge_with_close(void)       
638 {                                                 
639         unsigned long flags = VM_READ | VM_WRI    
640         struct mm_struct mm = {};                 
641         VMA_ITERATOR(vmi, &mm, 0);                
642         struct vma_merge_struct vmg = {           
643                 .mm = &mm,                        
644                 .vmi = &vmi,                      
645         };                                        
646         const struct vm_operations_struct vm_o    
647                 .close = dummy_close,             
648         };                                        
649         struct vm_area_struct *vma_prev, *vma_    
650                                                   
651         /*                                        
652          * When merging VMAs we are not permit    
653          * vm_ops->close() hook.                  
654          *                                        
655          * Considering the two possible adjace    
656          * merged:                                
657          *                                        
658          * [ prev ][ vma ][ next ]                
659          *                                        
660          * In no case will we need to delete p    
661          * mergeable, then prev will be extend    
662          * next deleted.                          
663          *                                        
664          * As a result, during initial mergeab    
665          * can_vma_merge_before() (which impli    
666          * 'next' as shown above) bothers to c    
667          * has a vm_ops->close() callback that    
668          * removed.                               
669          *                                        
670          * If it does, then we cannot merge as    
671          * operation potentially clears down a    
672          * range and we have no way of extendi    
673          *                                        
674          * We must consider two scenarios:        
675          *                                        
676          * A.                                     
677          *                                        
678          * vm_ops->close:     -       -    !NU    
679          *                 [ prev ][ vma ][ ne    
680          *                                        
681          * Where prev may or may not be presen    
682          *                                        
683          * This is picked up by a specific che    
684          *                                        
685          * B.                                     
686          *                                        
687          * vm_ops->close:     -     !NULL         
688          *                 [ prev ][ vma ]        
689          *                                        
690          * Where prev and vma are present and     
691          *                                        
692          * This is picked up by a specific che    
693          *                                        
694          * IMPORTANT NOTE: We make the assumpt    
695          *                                        
696          *    -     !NULL   NULL                  
697          * [ prev ][ vma ][ next ]                
698          *                                        
699          * Cannot occur, because vma->vm_ops b    
700          * vma->vm_file, and therefore this wo    
701          * would be set too, and thus scenario    
702          */                                       
703                                                   
704         /*                                        
705          * The only case of a new VMA merge th    
706          * is one where both the previous and     
707          * instance the next VMA is deleted, a    
708          *                                        
709          * If we are unable to do so, we reduc    
710          * extending the prev VMA and not merg    
711          *                                        
712          * 0123456789                             
713          * PPP**NNNN                              
714          *             ->                         
715          * 0123456789                             
716          * PPPPPPNNN                              
717          */                                       
718                                                   
719         vma_prev = alloc_and_link_vma(&mm, 0,     
720         vma_next = alloc_and_link_vma(&mm, 0x5    
721         vma_next->vm_ops = &vm_ops;               
722                                                   
723         vmg_set_range(&vmg, 0x3000, 0x5000, 3,    
724         ASSERT_EQ(merge_new(&vmg), vma_prev);     
725         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS    
726         ASSERT_EQ(vma_prev->vm_start, 0);         
727         ASSERT_EQ(vma_prev->vm_end, 0x5000);      
728         ASSERT_EQ(vma_prev->vm_pgoff, 0);         
729                                                   
730         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);      
731                                                   
732         /*                                        
733          * When modifying an existing VMA ther    
734          * delete VMAs.                           
735          *                                        
736          *    <>                                  
737          * 0123456789                             
738          * PPPVV                                  
739          *                                        
740          * In this instance, if vma has a clos    
741          * proceed.                               
742          */                                       
743                                                   
744         vma_prev = alloc_and_link_vma(&mm, 0,     
745         vma = alloc_and_link_vma(&mm, 0x3000,     
746         vma->vm_ops = &vm_ops;                    
747                                                   
748         vmg_set_range(&vmg, 0x3000, 0x5000, 3,    
749         vmg.prev = vma_prev;                      
750         vmg.vma = vma;                            
751                                                   
752         /*                                        
753          * The VMA being modified in a way tha    
754          * also fail.                             
755          */                                       
756         ASSERT_EQ(merge_existing(&vmg), NULL);    
757         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE    
758                                                   
759         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);      
760                                                   
761         /*                                        
762          * This case is mirrored if merging wi    
763          *                                        
764          *    <>                                  
765          * 0123456789                             
766          *    VVNNNN                              
767          *                                        
768          * In this instance, if vma has a clos    
769          * proceed.                               
770          */                                       
771                                                   
772         vma = alloc_and_link_vma(&mm, 0x3000,     
773         vma_next = alloc_and_link_vma(&mm, 0x5    
774         vma->vm_ops = &vm_ops;                    
775                                                   
776         vmg_set_range(&vmg, 0x3000, 0x5000, 3,    
777         vmg.vma = vma;                            
778         ASSERT_EQ(merge_existing(&vmg), NULL);    
779         /*                                        
780          * Initially this is misapprehended as    
781          * close() check is handled in the sam    
782          * failures, however a subsequent patc    
783          */                                       
784         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE    
785                                                   
786         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);      
787                                                   
788         /*                                        
789          * Finally, we consider two variants o    
790          * to merge with both the previous and    
791          *                                        
792          * The first variant is where vma has     
793          * merge can proceed.                     
794          *                                        
795          *    <>                                  
796          * 0123456789                             
797          * PPPVVNNNN                              
798          */                                       
799                                                   
800         vma_prev = alloc_and_link_vma(&mm, 0,     
801         vma = alloc_and_link_vma(&mm, 0x3000,     
802         vma_next = alloc_and_link_vma(&mm, 0x5    
803         vma->vm_ops = &vm_ops;                    
804                                                   
805         vmg_set_range(&vmg, 0x3000, 0x5000, 3,    
806         vmg.prev = vma_prev;                      
807         vmg.vma = vma;                            
808                                                   
809         ASSERT_EQ(merge_existing(&vmg), NULL);    
810         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE    
811                                                   
812         ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);      
813                                                   
814         /*                                        
815          * The second variant is where next ha    
816          * we reduce the operation to a merge     
817          *                                        
818          *    <>                                  
819          * 0123456789                             
820          * PPPVVNNNN                              
821          *            ->                          
822          * 0123456789                             
823          * PPPPPNNNN                              
824          */                                       
825                                                   
826         vma_prev = alloc_and_link_vma(&mm, 0,     
827         vma = alloc_and_link_vma(&mm, 0x3000,     
828         vma_next = alloc_and_link_vma(&mm, 0x5    
829         vma_next->vm_ops = &vm_ops;               
830                                                   
831         vmg_set_range(&vmg, 0x3000, 0x5000, 3,    
832         vmg.prev = vma_prev;                      
833         vmg.vma = vma;                            
834                                                   
835         ASSERT_EQ(merge_existing(&vmg), vma_pr    
836         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS    
837         ASSERT_EQ(vma_prev->vm_start, 0);         
838         ASSERT_EQ(vma_prev->vm_end, 0x5000);      
839         ASSERT_EQ(vma_prev->vm_pgoff, 0);         
840                                                   
841         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);      
842                                                   
843         return true;                              
844 }                                                 
845                                                   
846 static bool test_vma_merge_new_with_close(void    
847 {                                                 
848         unsigned long flags = VM_READ | VM_WRI    
849         struct mm_struct mm = {};                 
850         VMA_ITERATOR(vmi, &mm, 0);                
851         struct vma_merge_struct vmg = {           
852                 .mm = &mm,                        
853                 .vmi = &vmi,                      
854         };                                        
855         struct vm_area_struct *vma_prev = allo    
856         struct vm_area_struct *vma_next = allo    
857         const struct vm_operations_struct vm_o    
858                 .close = dummy_close,             
859         };                                        
860         struct vm_area_struct *vma;               
861                                                   
862         /*                                        
863          * We should allow the partial merge o    
864          * surrounding VMAs have vm_ops->close    
865          * compatible), e.g.:                     
866          *                                        
867          *        New VMA                         
868          *    A  v-------v  B                     
869          * |-----|       |-----|                  
870          *  close         close                   
871          *                                        
872          * Since the rule is to not DELETE a V    
873          * should be permitted, only rather th    
874          * should simply expand A and leave B     
875          *                                        
876          *        New VMA                         
877          *       A          B                     
878          * |------------||-----|                  
879          *  close         close                   
880          */                                       
881                                                   
882         /* Have prev and next have a vm_ops->c    
883         vma_prev->vm_ops = &vm_ops;               
884         vma_next->vm_ops = &vm_ops;               
885                                                   
886         vmg_set_range(&vmg, 0x2000, 0x5000, 2,    
887         vma = merge_new(&vmg);                    
888         ASSERT_NE(vma, NULL);                     
889         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS    
890         ASSERT_EQ(vma->vm_start, 0);              
891         ASSERT_EQ(vma->vm_end, 0x5000);           
892         ASSERT_EQ(vma->vm_pgoff, 0);              
893         ASSERT_EQ(vma->vm_ops, &vm_ops);          
894         ASSERT_TRUE(vma_write_started(vma));      
895         ASSERT_EQ(mm.map_count, 2);               
896                                                   
897         cleanup_mm(&mm, &vmi);                    
898         return true;                              
899 }                                                 
900                                                   
901 static bool test_merge_existing(void)             
902 {                                                 
903         unsigned long flags = VM_READ | VM_WRI    
904         struct mm_struct mm = {};                 
905         VMA_ITERATOR(vmi, &mm, 0);                
906         struct vm_area_struct *vma, *vma_prev,    
907         struct vma_merge_struct vmg = {           
908                 .mm = &mm,                        
909                 .vmi = &vmi,                      
910         };                                        
911         const struct vm_operations_struct vm_o    
912                 .close = dummy_close,             
913         };                                        
914                                                   
915         /*                                        
916          * Merge right case - partial span.       
917          *                                        
918          *    <->                                 
919          * 0123456789                             
920          *   VVVVNNN                              
921          *            ->                          
922          * 0123456789                             
923          *   VNNNNNN                              
924          */                                       
925         vma = alloc_and_link_vma(&mm, 0x2000,     
926         vma->vm_ops = &vm_ops; /* This should     
927         vma_next = alloc_and_link_vma(&mm, 0x6    
928         vma_next->vm_ops = &vm_ops; /* This sh    
929         vmg_set_range(&vmg, 0x3000, 0x6000, 3,    
930         vmg.vma = vma;                            
931         vmg.prev = vma;                           
932         vma->anon_vma = &dummy_anon_vma;          
933         ASSERT_EQ(merge_existing(&vmg), vma_ne    
934         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS    
935         ASSERT_EQ(vma_next->vm_start, 0x3000);    
936         ASSERT_EQ(vma_next->vm_end, 0x9000);      
937         ASSERT_EQ(vma_next->vm_pgoff, 3);         
938         ASSERT_EQ(vma_next->anon_vma, &dummy_a    
939         ASSERT_EQ(vma->vm_start, 0x2000);         
940         ASSERT_EQ(vma->vm_end, 0x3000);           
941         ASSERT_EQ(vma->vm_pgoff, 2);              
942         ASSERT_TRUE(vma_write_started(vma));      
943         ASSERT_TRUE(vma_write_started(vma_next    
944         ASSERT_EQ(mm.map_count, 2);               
945                                                   
946         /* Clear down and reset. */               
947         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);      
948                                                   
949         /*                                        
950          * Merge right case - full span.          
951          *                                        
952          *   <-->                                 
953          * 0123456789                             
954          *   VVVVNNN                              
955          *            ->                          
956          * 0123456789                             
957          *   NNNNNNN                              
958          */                                       
959         vma = alloc_and_link_vma(&mm, 0x2000,     
960         vma_next = alloc_and_link_vma(&mm, 0x6    
961         vma_next->vm_ops = &vm_ops; /* This sh    
962         vmg_set_range(&vmg, 0x2000, 0x6000, 2,    
963         vmg.vma = vma;                            
964         vma->anon_vma = &dummy_anon_vma;          
965         ASSERT_EQ(merge_existing(&vmg), vma_ne    
966         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS    
967         ASSERT_EQ(vma_next->vm_start, 0x2000);    
968         ASSERT_EQ(vma_next->vm_end, 0x9000);      
969         ASSERT_EQ(vma_next->vm_pgoff, 2);         
970         ASSERT_EQ(vma_next->anon_vma, &dummy_a    
971         ASSERT_TRUE(vma_write_started(vma_next    
972         ASSERT_EQ(mm.map_count, 1);               
973                                                   
974         /* Clear down and reset. We should hav    
975         ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);      
976                                                   
977         /*                                        
978          * Merge left case - partial span.        
979          *                                        
980          *    <->                                 
981          * 0123456789                             
982          * PPPVVVV                                
983          *            ->                          
984          * 0123456789                             
985          * PPPPPPV                                
986          */                                       
987         vma_prev = alloc_and_link_vma(&mm, 0,     
988         vma_prev->vm_ops = &vm_ops; /* This sh    
989         vma = alloc_and_link_vma(&mm, 0x3000,     
990         vma->vm_ops = &vm_ops; /* This should     
991         vmg_set_range(&vmg, 0x3000, 0x6000, 3,    
992         vmg.prev = vma_prev;                      
993         vmg.vma = vma;                            
994         vma->anon_vma = &dummy_anon_vma;          
995                                                   
996         ASSERT_EQ(merge_existing(&vmg), vma_pr    
997         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS    
998         ASSERT_EQ(vma_prev->vm_start, 0);         
999         ASSERT_EQ(vma_prev->vm_end, 0x6000);      
1000         ASSERT_EQ(vma_prev->vm_pgoff, 0);        
1001         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1002         ASSERT_EQ(vma->vm_start, 0x6000);        
1003         ASSERT_EQ(vma->vm_end, 0x7000);          
1004         ASSERT_EQ(vma->vm_pgoff, 6);             
1005         ASSERT_TRUE(vma_write_started(vma_pre    
1006         ASSERT_TRUE(vma_write_started(vma));     
1007         ASSERT_EQ(mm.map_count, 2);              
1008                                                  
1009         /* Clear down and reset. */              
1010         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);     
1011                                                  
1012         /*                                       
1013          * Merge left case - full span.          
1014          *                                       
1015          *    <-->                               
1016          * 0123456789                            
1017          * PPPVVVV                               
1018          *            ->                         
1019          * 0123456789                            
1020          * PPPPPPP                               
1021          */                                      
1022         vma_prev = alloc_and_link_vma(&mm, 0,    
1023         vma_prev->vm_ops = &vm_ops; /* This s    
1024         vma = alloc_and_link_vma(&mm, 0x3000,    
1025         vmg_set_range(&vmg, 0x3000, 0x7000, 3    
1026         vmg.prev = vma_prev;                     
1027         vmg.vma = vma;                           
1028         vma->anon_vma = &dummy_anon_vma;         
1029         ASSERT_EQ(merge_existing(&vmg), vma_p    
1030         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1031         ASSERT_EQ(vma_prev->vm_start, 0);        
1032         ASSERT_EQ(vma_prev->vm_end, 0x7000);     
1033         ASSERT_EQ(vma_prev->vm_pgoff, 0);        
1034         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1035         ASSERT_TRUE(vma_write_started(vma_pre    
1036         ASSERT_EQ(mm.map_count, 1);              
1037                                                  
1038         /* Clear down and reset. We should ha    
1039         ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);     
1040                                                  
1041         /*                                       
1042          * Merge both case.                      
1043          *                                       
1044          *    <-->                               
1045          * 0123456789                            
1046          * PPPVVVVNNN                            
1047          *             ->                        
1048          * 0123456789                            
1049          * PPPPPPPPPP                            
1050          */                                      
1051         vma_prev = alloc_and_link_vma(&mm, 0,    
1052         vma_prev->vm_ops = &vm_ops; /* This s    
1053         vma = alloc_and_link_vma(&mm, 0x3000,    
1054         vma_next = alloc_and_link_vma(&mm, 0x    
1055         vmg_set_range(&vmg, 0x3000, 0x7000, 3    
1056         vmg.prev = vma_prev;                     
1057         vmg.vma = vma;                           
1058         vma->anon_vma = &dummy_anon_vma;         
1059         ASSERT_EQ(merge_existing(&vmg), vma_p    
1060         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1061         ASSERT_EQ(vma_prev->vm_start, 0);        
1062         ASSERT_EQ(vma_prev->vm_end, 0x9000);     
1063         ASSERT_EQ(vma_prev->vm_pgoff, 0);        
1064         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1065         ASSERT_TRUE(vma_write_started(vma_pre    
1066         ASSERT_EQ(mm.map_count, 1);              
1067                                                  
1068         /* Clear down and reset. We should ha    
1069         ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);     
1070                                                  
1071         /*                                       
1072          * Non-merge ranges. the modified VMA    
1073          * caller always specifies ranges wit    
1074          * examine these cases.                  
1075          *                                       
1076          *     -                                 
1077          *      -                                
1078          *       -                               
1079          *     <->                               
1080          *     <>                                
1081          *      <>                               
1082          * 0123456789a                           
1083          * PPPVVVVVNNN                           
1084          */                                      
1085                                                  
1086         vma_prev = alloc_and_link_vma(&mm, 0,    
1087         vma = alloc_and_link_vma(&mm, 0x3000,    
1088         vma_next = alloc_and_link_vma(&mm, 0x    
1089                                                  
1090         vmg_set_range(&vmg, 0x4000, 0x5000, 4    
1091         vmg.prev = vma;                          
1092         vmg.vma = vma;                           
1093         ASSERT_EQ(merge_existing(&vmg), NULL)    
1094         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERG    
1095                                                  
1096         vmg_set_range(&vmg, 0x5000, 0x6000, 5    
1097         vmg.prev = vma;                          
1098         vmg.vma = vma;                           
1099         ASSERT_EQ(merge_existing(&vmg), NULL)    
1100         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERG    
1101                                                  
1102         vmg_set_range(&vmg, 0x6000, 0x7000, 6    
1103         vmg.prev = vma;                          
1104         vmg.vma = vma;                           
1105         ASSERT_EQ(merge_existing(&vmg), NULL)    
1106         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERG    
1107                                                  
1108         vmg_set_range(&vmg, 0x4000, 0x7000, 4    
1109         vmg.prev = vma;                          
1110         vmg.vma = vma;                           
1111         ASSERT_EQ(merge_existing(&vmg), NULL)    
1112         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERG    
1113                                                  
1114         vmg_set_range(&vmg, 0x4000, 0x6000, 4    
1115         vmg.prev = vma;                          
1116         vmg.vma = vma;                           
1117         ASSERT_EQ(merge_existing(&vmg), NULL)    
1118         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERG    
1119                                                  
1120         vmg_set_range(&vmg, 0x5000, 0x6000, 5    
1121         vmg.prev = vma;                          
1122         vmg.vma = vma;                           
1123         ASSERT_EQ(merge_existing(&vmg), NULL)    
1124         ASSERT_EQ(vmg.state, VMA_MERGE_NOMERG    
1125                                                  
1126         ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);     
1127                                                  
1128         return true;                             
1129 }                                                
1130                                                  
1131 static bool test_anon_vma_non_mergeable(void)    
1132 {                                                
1133         unsigned long flags = VM_READ | VM_WR    
1134         struct mm_struct mm = {};                
1135         VMA_ITERATOR(vmi, &mm, 0);               
1136         struct vm_area_struct *vma, *vma_prev    
1137         struct vma_merge_struct vmg = {          
1138                 .mm = &mm,                       
1139                 .vmi = &vmi,                     
1140         };                                       
1141         struct anon_vma_chain dummy_anon_vma_    
1142                 .anon_vma = &dummy_anon_vma,     
1143         };                                       
1144         struct anon_vma_chain dummy_anon_vma_    
1145                 .anon_vma = &dummy_anon_vma,     
1146         };                                       
1147                                                  
1148         /*                                       
1149          * In the case of modified VMA merge,    
1150          * but where prev and next have incom    
1151          * to a merge of prev and VMA:           
1152          *                                       
1153          *    <-->                               
1154          * 0123456789                            
1155          * PPPVVVVNNN                            
1156          *            ->                         
1157          * 0123456789                            
1158          * PPPPPPPNNN                            
1159          */                                      
1160         vma_prev = alloc_and_link_vma(&mm, 0,    
1161         vma = alloc_and_link_vma(&mm, 0x3000,    
1162         vma_next = alloc_and_link_vma(&mm, 0x    
1163                                                  
1164         /*                                       
1165          * Give both prev and next single ano    
1166          * merge with the NULL vmg->anon_vma.    
1167          *                                       
1168          * However, when prev is compared to     
1169          */                                      
1170                                                  
1171         INIT_LIST_HEAD(&vma_prev->anon_vma_ch    
1172         list_add(&dummy_anon_vma_chain1.same_    
1173         ASSERT_TRUE(list_is_singular(&vma_pre    
1174         vma_prev->anon_vma = &dummy_anon_vma;    
1175         ASSERT_TRUE(is_mergeable_anon_vma(NUL    
1176                                                  
1177         INIT_LIST_HEAD(&vma_next->anon_vma_ch    
1178         list_add(&dummy_anon_vma_chain2.same_    
1179         ASSERT_TRUE(list_is_singular(&vma_nex    
1180         vma_next->anon_vma = (struct anon_vma    
1181         ASSERT_TRUE(is_mergeable_anon_vma(NUL    
1182                                                  
1183         ASSERT_FALSE(is_mergeable_anon_vma(vm    
1184                                                  
1185         vmg_set_range(&vmg, 0x3000, 0x7000, 3    
1186         vmg.prev = vma_prev;                     
1187         vmg.vma = vma;                           
1188                                                  
1189         ASSERT_EQ(merge_existing(&vmg), vma_p    
1190         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1191         ASSERT_EQ(vma_prev->vm_start, 0);        
1192         ASSERT_EQ(vma_prev->vm_end, 0x7000);     
1193         ASSERT_EQ(vma_prev->vm_pgoff, 0);        
1194         ASSERT_TRUE(vma_write_started(vma_pre    
1195         ASSERT_FALSE(vma_write_started(vma_ne    
1196                                                  
1197         /* Clear down and reset. */              
1198         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);     
1199                                                  
1200         /*                                       
1201          * Now consider the new VMA case. Thi    
1202          * VMA in a gap between prev and next    
1203          *                                       
1204          *    <-->                               
1205          * 0123456789                            
1206          * PPP****NNN                            
1207          *            ->                         
1208          * 0123456789                            
1209          * PPPPPPPNNN                            
1210          */                                      
1211         vma_prev = alloc_and_link_vma(&mm, 0,    
1212         vma_next = alloc_and_link_vma(&mm, 0x    
1213                                                  
1214         INIT_LIST_HEAD(&vma_prev->anon_vma_ch    
1215         list_add(&dummy_anon_vma_chain1.same_    
1216         vma_prev->anon_vma = (struct anon_vma    
1217                                                  
1218         INIT_LIST_HEAD(&vma_next->anon_vma_ch    
1219         list_add(&dummy_anon_vma_chain2.same_    
1220         vma_next->anon_vma = (struct anon_vma    
1221                                                  
1222         vmg_set_range(&vmg, 0x3000, 0x7000, 3    
1223         vmg.prev = vma_prev;                     
1224                                                  
1225         ASSERT_EQ(merge_new(&vmg), vma_prev);    
1226         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1227         ASSERT_EQ(vma_prev->vm_start, 0);        
1228         ASSERT_EQ(vma_prev->vm_end, 0x7000);     
1229         ASSERT_EQ(vma_prev->vm_pgoff, 0);        
1230         ASSERT_TRUE(vma_write_started(vma_pre    
1231         ASSERT_FALSE(vma_write_started(vma_ne    
1232                                                  
1233         /* Final cleanup. */                     
1234         ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);     
1235                                                  
1236         return true;                             
1237 }                                                
1238                                                  
1239 static bool test_dup_anon_vma(void)              
1240 {                                                
1241         unsigned long flags = VM_READ | VM_WR    
1242         struct mm_struct mm = {};                
1243         VMA_ITERATOR(vmi, &mm, 0);               
1244         struct vma_merge_struct vmg = {          
1245                 .mm = &mm,                       
1246                 .vmi = &vmi,                     
1247         };                                       
1248         struct anon_vma_chain dummy_anon_vma_    
1249                 .anon_vma = &dummy_anon_vma,     
1250         };                                       
1251         struct vm_area_struct *vma_prev, *vma    
1252                                                  
1253         reset_dummy_anon_vma();                  
1254                                                  
1255         /*                                       
1256          * Expanding a VMA delete the next on    
1257          * assigns it to the expanded VMA.       
1258          *                                       
1259          * This covers new VMA merging, as th    
1260          * expand.                               
1261          */                                      
1262         vma_prev = alloc_and_link_vma(&mm, 0,    
1263         vma_next = alloc_and_link_vma(&mm, 0x    
1264         vma_next->anon_vma = &dummy_anon_vma;    
1265                                                  
1266         vmg_set_range(&vmg, 0, 0x5000, 0, fla    
1267         vmg.vma = vma_prev;                      
1268         vmg.next = vma_next;                     
1269                                                  
1270         ASSERT_EQ(expand_existing(&vmg), 0);     
1271                                                  
1272         /* Will have been cloned. */             
1273         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1274         ASSERT_TRUE(vma_prev->anon_vma->was_c    
1275                                                  
1276         /* Cleanup ready for next run. */        
1277         cleanup_mm(&mm, &vmi);                   
1278                                                  
1279         /*                                       
1280          * next has anon_vma, we assign to pr    
1281          *                                       
1282          *         |<----->|                     
1283          * |-------*********-------|             
1284          *   prev     vma     next               
1285          *  extend   delete  delete              
1286          */                                      
1287                                                  
1288         vma_prev = alloc_and_link_vma(&mm, 0,    
1289         vma = alloc_and_link_vma(&mm, 0x3000,    
1290         vma_next = alloc_and_link_vma(&mm, 0x    
1291                                                  
1292         /* Initialise avc so mergeability che    
1293         INIT_LIST_HEAD(&vma_next->anon_vma_ch    
1294         list_add(&dummy_anon_vma_chain.same_v    
1295                                                  
1296         vma_next->anon_vma = &dummy_anon_vma;    
1297         vmg_set_range(&vmg, 0x3000, 0x5000, 3    
1298         vmg.prev = vma_prev;                     
1299         vmg.vma = vma;                           
1300                                                  
1301         ASSERT_EQ(merge_existing(&vmg), vma_p    
1302         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1303                                                  
1304         ASSERT_EQ(vma_prev->vm_start, 0);        
1305         ASSERT_EQ(vma_prev->vm_end, 0x8000);     
1306                                                  
1307         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1308         ASSERT_TRUE(vma_prev->anon_vma->was_c    
1309                                                  
1310         cleanup_mm(&mm, &vmi);                   
1311                                                  
1312         /*                                       
1313          * vma has anon_vma, we assign to pre    
1314          *                                       
1315          *         |<----->|                     
1316          * |-------*********-------|             
1317          *   prev     vma     next               
1318          *  extend   delete  delete              
1319          */                                      
1320                                                  
1321         vma_prev = alloc_and_link_vma(&mm, 0,    
1322         vma = alloc_and_link_vma(&mm, 0x3000,    
1323         vma_next = alloc_and_link_vma(&mm, 0x    
1324                                                  
1325         vma->anon_vma = &dummy_anon_vma;         
1326         vmg_set_range(&vmg, 0x3000, 0x5000, 3    
1327         vmg.prev = vma_prev;                     
1328         vmg.vma = vma;                           
1329                                                  
1330         ASSERT_EQ(merge_existing(&vmg), vma_p    
1331         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1332                                                  
1333         ASSERT_EQ(vma_prev->vm_start, 0);        
1334         ASSERT_EQ(vma_prev->vm_end, 0x8000);     
1335                                                  
1336         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1337         ASSERT_TRUE(vma_prev->anon_vma->was_c    
1338                                                  
1339         cleanup_mm(&mm, &vmi);                   
1340                                                  
1341         /*                                       
1342          * vma has anon_vma, we assign to pre    
1343          *                                       
1344          *         |<----->|                     
1345          * |-------*************                 
1346          *   prev       vma                      
1347          *  extend shrink/delete                 
1348          */                                      
1349                                                  
1350         vma_prev = alloc_and_link_vma(&mm, 0,    
1351         vma = alloc_and_link_vma(&mm, 0x3000,    
1352                                                  
1353         vma->anon_vma = &dummy_anon_vma;         
1354         vmg_set_range(&vmg, 0x3000, 0x5000, 3    
1355         vmg.prev = vma_prev;                     
1356         vmg.vma = vma;                           
1357                                                  
1358         ASSERT_EQ(merge_existing(&vmg), vma_p    
1359         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1360                                                  
1361         ASSERT_EQ(vma_prev->vm_start, 0);        
1362         ASSERT_EQ(vma_prev->vm_end, 0x5000);     
1363                                                  
1364         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1365         ASSERT_TRUE(vma_prev->anon_vma->was_c    
1366                                                  
1367         cleanup_mm(&mm, &vmi);                   
1368                                                  
1369         /*                                       
1370          * vma has anon_vma, we assign to nex    
1371          *                                       
1372          *     |<----->|                         
1373          * *************-------|                 
1374          *      vma       next                   
1375          * shrink/delete extend                  
1376          */                                      
1377                                                  
1378         vma = alloc_and_link_vma(&mm, 0, 0x50    
1379         vma_next = alloc_and_link_vma(&mm, 0x    
1380                                                  
1381         vma->anon_vma = &dummy_anon_vma;         
1382         vmg_set_range(&vmg, 0x3000, 0x5000, 3    
1383         vmg.prev = vma;                          
1384         vmg.vma = vma;                           
1385                                                  
1386         ASSERT_EQ(merge_existing(&vmg), vma_n    
1387         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1388                                                  
1389         ASSERT_EQ(vma_next->vm_start, 0x3000)    
1390         ASSERT_EQ(vma_next->vm_end, 0x8000);     
1391                                                  
1392         ASSERT_EQ(vma_next->anon_vma, &dummy_    
1393         ASSERT_TRUE(vma_next->anon_vma->was_c    
1394                                                  
1395         cleanup_mm(&mm, &vmi);                   
1396         return true;                             
1397 }                                                
1398                                                  
1399 static bool test_vmi_prealloc_fail(void)         
1400 {                                                
1401         unsigned long flags = VM_READ | VM_WR    
1402         struct mm_struct mm = {};                
1403         VMA_ITERATOR(vmi, &mm, 0);               
1404         struct vma_merge_struct vmg = {          
1405                 .mm = &mm,                       
1406                 .vmi = &vmi,                     
1407         };                                       
1408         struct vm_area_struct *vma_prev, *vma    
1409                                                  
1410         /*                                       
1411          * We are merging vma into prev, with    
1412          * will be duplicated. We cause the v    
1413          * the duplicated anon_vma is unlinke    
1414          */                                      
1415                                                  
1416         vma_prev = alloc_and_link_vma(&mm, 0,    
1417         vma = alloc_and_link_vma(&mm, 0x3000,    
1418         vma->anon_vma = &dummy_anon_vma;         
1419                                                  
1420         vmg_set_range(&vmg, 0x3000, 0x5000, 3    
1421         vmg.prev = vma_prev;                     
1422         vmg.vma = vma;                           
1423                                                  
1424         fail_prealloc = true;                    
1425                                                  
1426         /* This will cause the merge to fail.    
1427         ASSERT_EQ(merge_existing(&vmg), NULL)    
1428         ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_    
1429         /* We will already have assigned the     
1430         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1431         /* And it was both cloned and unlinke    
1432         ASSERT_TRUE(dummy_anon_vma.was_cloned    
1433         ASSERT_TRUE(dummy_anon_vma.was_unlink    
1434                                                  
1435         cleanup_mm(&mm, &vmi); /* Resets fail    
1436                                                  
1437         /*                                       
1438          * We repeat the same operation for e    
1439          * VMA merging ultimately uses too. T    
1440          * performed in this case too.           
1441          */                                      
1442                                                  
1443         vma_prev = alloc_and_link_vma(&mm, 0,    
1444         vma = alloc_and_link_vma(&mm, 0x3000,    
1445         vma->anon_vma = &dummy_anon_vma;         
1446                                                  
1447         vmg_set_range(&vmg, 0, 0x5000, 3, fla    
1448         vmg.vma = vma_prev;                      
1449         vmg.next = vma;                          
1450                                                  
1451         fail_prealloc = true;                    
1452         ASSERT_EQ(expand_existing(&vmg), -ENO    
1453         ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_    
1454                                                  
1455         ASSERT_EQ(vma_prev->anon_vma, &dummy_    
1456         ASSERT_TRUE(dummy_anon_vma.was_cloned    
1457         ASSERT_TRUE(dummy_anon_vma.was_unlink    
1458                                                  
1459         cleanup_mm(&mm, &vmi);                   
1460         return true;                             
1461 }                                                
1462                                                  
1463 static bool test_merge_extend(void)              
1464 {                                                
1465         unsigned long flags = VM_READ | VM_WR    
1466         struct mm_struct mm = {};                
1467         VMA_ITERATOR(vmi, &mm, 0x1000);          
1468         struct vm_area_struct *vma;              
1469                                                  
1470         vma = alloc_and_link_vma(&mm, 0, 0x10    
1471         alloc_and_link_vma(&mm, 0x3000, 0x400    
1472                                                  
1473         /*                                       
1474          * Extend a VMA into the gap between     
1475          * This should result in a merge.        
1476          *                                       
1477          * <->                                   
1478          * *  *                                  
1479          *                                       
1480          */                                      
1481                                                  
1482         ASSERT_EQ(vma_merge_extend(&vmi, vma,    
1483         ASSERT_EQ(vma->vm_start, 0);             
1484         ASSERT_EQ(vma->vm_end, 0x4000);          
1485         ASSERT_EQ(vma->vm_pgoff, 0);             
1486         ASSERT_TRUE(vma_write_started(vma));     
1487         ASSERT_EQ(mm.map_count, 1);              
1488                                                  
1489         cleanup_mm(&mm, &vmi);                   
1490         return true;                             
1491 }                                                
1492                                                  
1493 static bool test_copy_vma(void)                  
1494 {                                                
1495         unsigned long flags = VM_READ | VM_WR    
1496         struct mm_struct mm = {};                
1497         bool need_locks = false;                 
1498         VMA_ITERATOR(vmi, &mm, 0);               
1499         struct vm_area_struct *vma, *vma_new,    
1500                                                  
1501         /* Move backwards and do not merge. *    
1502                                                  
1503         vma = alloc_and_link_vma(&mm, 0x3000,    
1504         vma_new = copy_vma(&vma, 0, 0x2000, 0    
1505                                                  
1506         ASSERT_NE(vma_new, vma);                 
1507         ASSERT_EQ(vma_new->vm_start, 0);         
1508         ASSERT_EQ(vma_new->vm_end, 0x2000);      
1509         ASSERT_EQ(vma_new->vm_pgoff, 0);         
1510                                                  
1511         cleanup_mm(&mm, &vmi);                   
1512                                                  
1513         /* Move a VMA into position next to a    
1514                                                  
1515         vma = alloc_and_link_vma(&mm, 0, 0x20    
1516         vma_next = alloc_and_link_vma(&mm, 0x    
1517         vma_new = copy_vma(&vma, 0x4000, 0x20    
1518                                                  
1519         ASSERT_EQ(vma_new, vma_next);            
1520                                                  
1521         cleanup_mm(&mm, &vmi);                   
1522         return true;                             
1523 }                                                
1524                                                  
1525 static bool test_expand_only_mode(void)          
1526 {                                                
1527         unsigned long flags = VM_READ | VM_WR    
1528         struct mm_struct mm = {};                
1529         VMA_ITERATOR(vmi, &mm, 0);               
1530         struct vm_area_struct *vma_prev, *vma    
1531         VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9    
1532                                                  
1533         /*                                       
1534          * Place a VMA prior to the one we're    
1535          * not erroneously try to traverse to    
1536          * have, through the use of VMG_FLAG_    
1537          * need to do so.                        
1538          */                                      
1539         alloc_and_link_vma(&mm, 0, 0x2000, 0,    
1540                                                  
1541         /*                                       
1542          * We will be positioned at the prev     
1543          * 0x9000.                               
1544          */                                      
1545         vma_iter_set(&vmi, 0x3000);              
1546         vma_prev = alloc_and_link_vma(&mm, 0x    
1547         vmg.prev = vma_prev;                     
1548         vmg.merge_flags = VMG_FLAG_JUST_EXPAN    
1549                                                  
1550         vma = vma_merge_new_range(&vmg);         
1551         ASSERT_NE(vma, NULL);                    
1552         ASSERT_EQ(vma, vma_prev);                
1553         ASSERT_EQ(vmg.state, VMA_MERGE_SUCCES    
1554         ASSERT_EQ(vma->vm_start, 0x3000);        
1555         ASSERT_EQ(vma->vm_end, 0x9000);          
1556         ASSERT_EQ(vma->vm_pgoff, 3);             
1557         ASSERT_TRUE(vma_write_started(vma));     
1558         ASSERT_EQ(vma_iter_addr(&vmi), 0x3000    
1559                                                  
1560         cleanup_mm(&mm, &vmi);                   
1561         return true;                             
1562 }                                                
1563                                                  
1564 int main(void)                                   
1565 {                                                
1566         int num_tests = 0, num_fail = 0;         
1567                                                  
1568         maple_tree_init();                       
1569                                                  
1570 #define TEST(name)                               
1571         do {                                     
1572                 num_tests++;                     
1573                 if (!test_##name()) {            
1574                         num_fail++;              
1575                         fprintf(stderr, "Test    
1576                 }                                
1577         } while (0)                              
1578                                                  
1579         /* Very simple tests to kick the tyre    
1580         TEST(simple_merge);                      
1581         TEST(simple_modify);                     
1582         TEST(simple_expand);                     
1583         TEST(simple_shrink);                     
1584                                                  
1585         TEST(merge_new);                         
1586         TEST(vma_merge_special_flags);           
1587         TEST(vma_merge_with_close);              
1588         TEST(vma_merge_new_with_close);          
1589         TEST(merge_existing);                    
1590         TEST(anon_vma_non_mergeable);            
1591         TEST(dup_anon_vma);                      
1592         TEST(vmi_prealloc_fail);                 
1593         TEST(merge_extend);                      
1594         TEST(copy_vma);                          
1595         TEST(expand_only_mode);                  
1596                                                  
1597 #undef TEST                                      
1598                                                  
1599         printf("%d tests run, %d passed, %d f    
1600                num_tests, num_tests - num_fai    
1601                                                  
1602         return num_fail == 0 ? EXIT_SUCCESS :    
1603 }                                                
1604                                                  

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php