~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/shared/linux.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /tools/testing/shared/linux.c (Version linux-6.12-rc7) and /tools/testing/shared/linux.c (Version linux-5.18.19)


  1 // SPDX-License-Identifier: GPL-2.0                 1 
  2 #include <stdlib.h>                               
  3 #include <string.h>                               
  4 #include <malloc.h>                               
  5 #include <pthread.h>                              
  6 #include <unistd.h>                               
  7 #include <assert.h>                               
  8                                                   
  9 #include <linux/gfp.h>                            
 10 #include <linux/poison.h>                         
 11 #include <linux/slab.h>                           
 12 #include <linux/radix-tree.h>                     
 13 #include <urcu/uatomic.h>                         
 14                                                   
 15 int nr_allocated;                                 
 16 int preempt_count;                                
 17 int test_verbose;                                 
 18                                                   
 19 struct kmem_cache {                               
 20         pthread_mutex_t lock;                     
 21         unsigned int size;                        
 22         unsigned int align;                       
 23         int nr_objs;                              
 24         void *objs;                               
 25         void (*ctor)(void *);                     
 26         unsigned int non_kernel;                  
 27         unsigned long nr_allocated;               
 28         unsigned long nr_tallocated;              
 29         bool exec_callback;                       
 30         void (*callback)(void *);                 
 31         void *private;                            
 32 };                                                
 33                                                   
 34 void kmem_cache_set_callback(struct kmem_cache    
 35 {                                                 
 36         cachep->callback = callback;              
 37 }                                                 
 38                                                   
 39 void kmem_cache_set_private(struct kmem_cache     
 40 {                                                 
 41         cachep->private = private;                
 42 }                                                 
 43                                                   
 44 void kmem_cache_set_non_kernel(struct kmem_cac    
 45 {                                                 
 46         cachep->non_kernel = val;                 
 47 }                                                 
 48                                                   
 49 unsigned long kmem_cache_get_alloc(struct kmem    
 50 {                                                 
 51         return cachep->size * cachep->nr_alloc    
 52 }                                                 
 53                                                   
 54 unsigned long kmem_cache_nr_allocated(struct k    
 55 {                                                 
 56         return cachep->nr_allocated;              
 57 }                                                 
 58                                                   
 59 unsigned long kmem_cache_nr_tallocated(struct     
 60 {                                                 
 61         return cachep->nr_tallocated;             
 62 }                                                 
 63                                                   
 64 void kmem_cache_zero_nr_tallocated(struct kmem    
 65 {                                                 
 66         cachep->nr_tallocated = 0;                
 67 }                                                 
 68                                                   
 69 void *kmem_cache_alloc_lru(struct kmem_cache *    
 70                 int gfp)                          
 71 {                                                 
 72         void *p;                                  
 73                                                   
 74         if (cachep->exec_callback) {              
 75                 if (cachep->callback)             
 76                         cachep->callback(cache    
 77                 cachep->exec_callback = false;    
 78         }                                         
 79                                                   
 80         if (!(gfp & __GFP_DIRECT_RECLAIM)) {      
 81                 if (!cachep->non_kernel) {        
 82                         cachep->exec_callback     
 83                         return NULL;              
 84                 }                                 
 85                                                   
 86                 cachep->non_kernel--;             
 87         }                                         
 88                                                   
 89         pthread_mutex_lock(&cachep->lock);        
 90         if (cachep->nr_objs) {                    
 91                 struct radix_tree_node *node =    
 92                 cachep->nr_objs--;                
 93                 cachep->objs = node->parent;      
 94                 pthread_mutex_unlock(&cachep->    
 95                 node->parent = NULL;              
 96                 p = node;                         
 97         } else {                                  
 98                 pthread_mutex_unlock(&cachep->    
 99                 if (cachep->align)                
100                         posix_memalign(&p, cac    
101                 else                              
102                         p = malloc(cachep->siz    
103                 if (cachep->ctor)                 
104                         cachep->ctor(p);          
105                 else if (gfp & __GFP_ZERO)        
106                         memset(p, 0, cachep->s    
107         }                                         
108                                                   
109         uatomic_inc(&cachep->nr_allocated);       
110         uatomic_inc(&nr_allocated);               
111         uatomic_inc(&cachep->nr_tallocated);      
112         if (kmalloc_verbose)                      
113                 printf("Allocating %p from sla    
114         return p;                                 
115 }                                                 
116                                                   
117 void __kmem_cache_free_locked(struct kmem_cach    
118 {                                                 
119         assert(objp);                             
120         if (cachep->nr_objs > 10 || cachep->al    
121                 memset(objp, POISON_FREE, cach    
122                 free(objp);                       
123         } else {                                  
124                 struct radix_tree_node *node =    
125                 cachep->nr_objs++;                
126                 node->parent = cachep->objs;      
127                 cachep->objs = node;              
128         }                                         
129 }                                                 
130                                                   
131 void kmem_cache_free_locked(struct kmem_cache     
132 {                                                 
133         uatomic_dec(&nr_allocated);               
134         uatomic_dec(&cachep->nr_allocated);       
135         if (kmalloc_verbose)                      
136                 printf("Freeing %p to slab\n",    
137         __kmem_cache_free_locked(cachep, objp)    
138 }                                                 
139                                                   
140 void kmem_cache_free(struct kmem_cache *cachep    
141 {                                                 
142         pthread_mutex_lock(&cachep->lock);        
143         kmem_cache_free_locked(cachep, objp);     
144         pthread_mutex_unlock(&cachep->lock);      
145 }                                                 
146                                                   
147 void kmem_cache_free_bulk(struct kmem_cache *c    
148 {                                                 
149         if (kmalloc_verbose)                      
150                 pr_debug("Bulk free %p[0-%lu]\    
151                                                   
152         pthread_mutex_lock(&cachep->lock);        
153         for (int i = 0; i < size; i++)            
154                 kmem_cache_free_locked(cachep,    
155         pthread_mutex_unlock(&cachep->lock);      
156 }                                                 
157                                                   
158 void kmem_cache_shrink(struct kmem_cache *cach    
159 {                                                 
160 }                                                 
161                                                   
162 int kmem_cache_alloc_bulk(struct kmem_cache *c    
163                           void **p)               
164 {                                                 
165         size_t i;                                 
166                                                   
167         if (kmalloc_verbose)                      
168                 pr_debug("Bulk alloc %lu\n", s    
169                                                   
170         pthread_mutex_lock(&cachep->lock);        
171         if (cachep->nr_objs >= size) {            
172                 struct radix_tree_node *node;     
173                                                   
174                 for (i = 0; i < size; i++) {      
175                         if (!(gfp & __GFP_DIRE    
176                                 if (!cachep->n    
177                                         break;    
178                                 cachep->non_ke    
179                         }                         
180                                                   
181                         node = cachep->objs;      
182                         cachep->nr_objs--;        
183                         cachep->objs = node->p    
184                         p[i] = node;              
185                         node->parent = NULL;      
186                 }                                 
187                 pthread_mutex_unlock(&cachep->    
188         } else {                                  
189                 pthread_mutex_unlock(&cachep->    
190                 for (i = 0; i < size; i++) {      
191                         if (!(gfp & __GFP_DIRE    
192                                 if (!cachep->n    
193                                         break;    
194                                 cachep->non_ke    
195                         }                         
196                                                   
197                         if (cachep->align) {      
198                                 posix_memalign    
199                                                   
200                         } else {                  
201                                 p[i] = malloc(    
202                                 if (!p[i])        
203                                         break;    
204                         }                         
205                         if (cachep->ctor)         
206                                 cachep->ctor(p    
207                         else if (gfp & __GFP_Z    
208                                 memset(p[i], 0    
209                 }                                 
210         }                                         
211                                                   
212         if (i < size) {                           
213                 size = i;                         
214                 pthread_mutex_lock(&cachep->lo    
215                 for (i = 0; i < size; i++)        
216                         __kmem_cache_free_lock    
217                 pthread_mutex_unlock(&cachep->    
218                 return 0;                         
219         }                                         
220                                                   
221         for (i = 0; i < size; i++) {              
222                 uatomic_inc(&nr_allocated);       
223                 uatomic_inc(&cachep->nr_alloca    
224                 uatomic_inc(&cachep->nr_talloc    
225                 if (kmalloc_verbose)              
226                         printf("Allocating %p     
227         }                                         
228                                                   
229         return size;                              
230 }                                                 
231                                                   
232 struct kmem_cache *                               
233 kmem_cache_create(const char *name, unsigned i    
234                 unsigned int flags, void (*cto    
235 {                                                 
236         struct kmem_cache *ret = malloc(sizeof    
237                                                   
238         pthread_mutex_init(&ret->lock, NULL);     
239         ret->size = size;                         
240         ret->align = align;                       
241         ret->nr_objs = 0;                         
242         ret->nr_allocated = 0;                    
243         ret->nr_tallocated = 0;                   
244         ret->objs = NULL;                         
245         ret->ctor = ctor;                         
246         ret->non_kernel = 0;                      
247         ret->exec_callback = false;               
248         ret->callback = NULL;                     
249         ret->private = NULL;                      
250         return ret;                               
251 }                                                 
252                                                   
253 /*                                                
254  * Test the test infrastructure for kem_cache_    
255  */                                               
256 void test_kmem_cache_bulk(void)                   
257 {                                                 
258         int i;                                    
259         void *list[12];                           
260         static struct kmem_cache *test_cache,     
261                                                   
262         /*                                        
263          * Testing the bulk allocators without    
264          * bulk alloc/free to reuse               
265          */                                       
266         test_cache = kmem_cache_create("test_c    
267                                                   
268         for (i = 0; i < 5; i++)                   
269                 list[i] = kmem_cache_alloc(tes    
270                                                   
271         for (i = 0; i < 5; i++)                   
272                 kmem_cache_free(test_cache, li    
273         assert(test_cache->nr_objs == 5);         
274                                                   
275         kmem_cache_alloc_bulk(test_cache, __GF    
276         kmem_cache_free_bulk(test_cache, 5, li    
277                                                   
278         for (i = 0; i < 12 ; i++)                 
279                 list[i] = kmem_cache_alloc(tes    
280                                                   
281         for (i = 0; i < 12; i++)                  
282                 kmem_cache_free(test_cache, li    
283                                                   
284         /* The last free will not be kept arou    
285         assert(test_cache->nr_objs == 11);        
286                                                   
287         /* Aligned caches will immediately fre    
288         test_cache2 = kmem_cache_create("test_    
289                                                   
290         kmem_cache_alloc_bulk(test_cache2, __G    
291         kmem_cache_free_bulk(test_cache2, 10,     
292         assert(!test_cache2->nr_objs);            
293                                                   
294                                                   
295 }                                                 
296                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php