~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/genalloc.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /lib/genalloc.c (Version linux-6.12-rc7) and /lib/genalloc.c (Version ccs-tools-1.8.12)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  * Basic general purpose allocator for managin    
  4  * memory, for example, memory that is not man    
  5  * kmalloc/kfree interface.  Uses for this inc    
  6  * memory, uncached memory etc.                   
  7  *                                                
  8  * It is safe to use the allocator in NMI hand    
  9  * unblockable contexts that could otherwise d    
 10  * is implemented by using atomic operations a    
 11  * conflicts.  The disadvantage is that there     
 12  * extreme cases.  For better scalability, one    
 13  * for each CPU.                                  
 14  *                                                
 15  * The lockless operation only works if there     
 16  * available.  If new memory is added to the p    
 17  * still taken.  So any user relying on lockle    
 18  * that sufficient memory is preallocated.        
 19  *                                                
 20  * The basic atomic operation of this allocato    
 21  * On architectures that don't have NMI-safe c    
 22  * the allocator can NOT be used in NMI handle    
 23  * allocator in NMI handler should depend on      
 24  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.             
 25  *                                                
 26  * Copyright 2005 (C) Jes Sorensen <jes@traine    
 27  */                                               
 28                                                   
 29 #include <linux/slab.h>                           
 30 #include <linux/export.h>                         
 31 #include <linux/bitmap.h>                         
 32 #include <linux/rculist.h>                        
 33 #include <linux/interrupt.h>                      
 34 #include <linux/genalloc.h>                       
 35 #include <linux/of.h>                             
 36 #include <linux/of_platform.h>                    
 37 #include <linux/platform_device.h>                
 38 #include <linux/vmalloc.h>                        
 39                                                   
 40 static inline size_t chunk_size(const struct g    
 41 {                                                 
 42         return chunk->end_addr - chunk->start_    
 43 }                                                 
 44                                                   
 45 static inline int                                 
 46 set_bits_ll(unsigned long *addr, unsigned long    
 47 {                                                 
 48         unsigned long val = READ_ONCE(*addr);     
 49                                                   
 50         do {                                      
 51                 if (val & mask_to_set)            
 52                         return -EBUSY;            
 53                 cpu_relax();                      
 54         } while (!try_cmpxchg(addr, &val, val     
 55                                                   
 56         return 0;                                 
 57 }                                                 
 58                                                   
 59 static inline int                                 
 60 clear_bits_ll(unsigned long *addr, unsigned lo    
 61 {                                                 
 62         unsigned long val = READ_ONCE(*addr);     
 63                                                   
 64         do {                                      
 65                 if ((val & mask_to_clear) != m    
 66                         return -EBUSY;            
 67                 cpu_relax();                      
 68         } while (!try_cmpxchg(addr, &val, val     
 69                                                   
 70         return 0;                                 
 71 }                                                 
 72                                                   
 73 /*                                                
 74  * bitmap_set_ll - set the specified number of    
 75  * @map: pointer to a bitmap                      
 76  * @start: a bit position in @map                 
 77  * @nr: number of bits to set                     
 78  *                                                
 79  * Set @nr bits start from @start in @map lock    
 80  * can set/clear the same bitmap simultaneousl    
 81  * users set the same bit, one user will retur    
 82  * return 0.                                      
 83  */                                               
 84 static unsigned long                              
 85 bitmap_set_ll(unsigned long *map, unsigned lon    
 86 {                                                 
 87         unsigned long *p = map + BIT_WORD(star    
 88         const unsigned long size = start + nr;    
 89         int bits_to_set = BITS_PER_LONG - (sta    
 90         unsigned long mask_to_set = BITMAP_FIR    
 91                                                   
 92         while (nr >= bits_to_set) {               
 93                 if (set_bits_ll(p, mask_to_set    
 94                         return nr;                
 95                 nr -= bits_to_set;                
 96                 bits_to_set = BITS_PER_LONG;      
 97                 mask_to_set = ~0UL;               
 98                 p++;                              
 99         }                                         
100         if (nr) {                                 
101                 mask_to_set &= BITMAP_LAST_WOR    
102                 if (set_bits_ll(p, mask_to_set    
103                         return nr;                
104         }                                         
105                                                   
106         return 0;                                 
107 }                                                 
108                                                   
109 /*                                                
110  * bitmap_clear_ll - clear the specified numbe    
111  * @map: pointer to a bitmap                      
112  * @start: a bit position in @map                 
113  * @nr: number of bits to set                     
114  *                                                
115  * Clear @nr bits start from @start in @map lo    
116  * can set/clear the same bitmap simultaneousl    
117  * users clear the same bit, one user will ret    
118  * otherwise return 0.                            
119  */                                               
120 static unsigned long                              
121 bitmap_clear_ll(unsigned long *map, unsigned l    
122 {                                                 
123         unsigned long *p = map + BIT_WORD(star    
124         const unsigned long size = start + nr;    
125         int bits_to_clear = BITS_PER_LONG - (s    
126         unsigned long mask_to_clear = BITMAP_F    
127                                                   
128         while (nr >= bits_to_clear) {             
129                 if (clear_bits_ll(p, mask_to_c    
130                         return nr;                
131                 nr -= bits_to_clear;              
132                 bits_to_clear = BITS_PER_LONG;    
133                 mask_to_clear = ~0UL;             
134                 p++;                              
135         }                                         
136         if (nr) {                                 
137                 mask_to_clear &= BITMAP_LAST_W    
138                 if (clear_bits_ll(p, mask_to_c    
139                         return nr;                
140         }                                         
141                                                   
142         return 0;                                 
143 }                                                 
144                                                   
145 /**                                               
146  * gen_pool_create - create a new special memo    
147  * @min_alloc_order: log base 2 of number of b    
148  * @nid: node id of the node the pool structur    
149  *                                                
150  * Create a new special memory pool that can b    
151  * memory not managed by the regular kmalloc/k    
152  */                                               
153 struct gen_pool *gen_pool_create(int min_alloc    
154 {                                                 
155         struct gen_pool *pool;                    
156                                                   
157         pool = kmalloc_node(sizeof(struct gen_    
158         if (pool != NULL) {                       
159                 spin_lock_init(&pool->lock);      
160                 INIT_LIST_HEAD(&pool->chunks);    
161                 pool->min_alloc_order = min_al    
162                 pool->algo = gen_pool_first_fi    
163                 pool->data = NULL;                
164                 pool->name = NULL;                
165         }                                         
166         return pool;                              
167 }                                                 
168 EXPORT_SYMBOL(gen_pool_create);                   
169                                                   
170 /**                                               
171  * gen_pool_add_owner- add a new chunk of spec    
172  * @pool: pool to add new memory chunk to         
173  * @virt: virtual starting address of memory c    
174  * @phys: physical starting address of memory     
175  * @size: size in bytes of the memory chunk to    
176  * @nid: node id of the node the chunk structu    
177  *       allocated on, or -1                      
178  * @owner: private data the publisher would li    
179  *                                                
180  * Add a new chunk of special memory to the sp    
181  *                                                
182  * Returns 0 on success or a -ve errno on fail    
183  */                                               
184 int gen_pool_add_owner(struct gen_pool *pool,     
185                  size_t size, int nid, void *o    
186 {                                                 
187         struct gen_pool_chunk *chunk;             
188         unsigned long nbits = size >> pool->mi    
189         unsigned long nbytes = sizeof(struct g    
190                                 BITS_TO_LONGS(    
191                                                   
192         chunk = vzalloc_node(nbytes, nid);        
193         if (unlikely(chunk == NULL))              
194                 return -ENOMEM;                   
195                                                   
196         chunk->phys_addr = phys;                  
197         chunk->start_addr = virt;                 
198         chunk->end_addr = virt + size - 1;        
199         chunk->owner = owner;                     
200         atomic_long_set(&chunk->avail, size);     
201                                                   
202         spin_lock(&pool->lock);                   
203         list_add_rcu(&chunk->next_chunk, &pool    
204         spin_unlock(&pool->lock);                 
205                                                   
206         return 0;                                 
207 }                                                 
208 EXPORT_SYMBOL(gen_pool_add_owner);                
209                                                   
210 /**                                               
211  * gen_pool_virt_to_phys - return the physical    
212  * @pool: pool to allocate from                   
213  * @addr: starting address of memory              
214  *                                                
215  * Returns the physical address on success, or    
216  */                                               
217 phys_addr_t gen_pool_virt_to_phys(struct gen_p    
218 {                                                 
219         struct gen_pool_chunk *chunk;             
220         phys_addr_t paddr = -1;                   
221                                                   
222         rcu_read_lock();                          
223         list_for_each_entry_rcu(chunk, &pool->    
224                 if (addr >= chunk->start_addr     
225                         paddr = chunk->phys_ad    
226                         break;                    
227                 }                                 
228         }                                         
229         rcu_read_unlock();                        
230                                                   
231         return paddr;                             
232 }                                                 
233 EXPORT_SYMBOL(gen_pool_virt_to_phys);             
234                                                   
235 /**                                               
236  * gen_pool_destroy - destroy a special memory    
237  * @pool: pool to destroy                         
238  *                                                
239  * Destroy the specified special memory pool.     
240  * outstanding allocations.                       
241  */                                               
242 void gen_pool_destroy(struct gen_pool *pool)      
243 {                                                 
244         struct list_head *_chunk, *_next_chunk    
245         struct gen_pool_chunk *chunk;             
246         int order = pool->min_alloc_order;        
247         unsigned long bit, end_bit;               
248                                                   
249         list_for_each_safe(_chunk, _next_chunk    
250                 chunk = list_entry(_chunk, str    
251                 list_del(&chunk->next_chunk);     
252                                                   
253                 end_bit = chunk_size(chunk) >>    
254                 bit = find_first_bit(chunk->bi    
255                 BUG_ON(bit < end_bit);            
256                                                   
257                 vfree(chunk);                     
258         }                                         
259         kfree_const(pool->name);                  
260         kfree(pool);                              
261 }                                                 
262 EXPORT_SYMBOL(gen_pool_destroy);                  
263                                                   
264 /**                                               
265  * gen_pool_alloc_algo_owner - allocate specia    
266  * @pool: pool to allocate from                   
267  * @size: number of bytes to allocate from the    
268  * @algo: algorithm passed from caller            
269  * @data: data passed to algorithm                
270  * @owner: optionally retrieve the chunk owner    
271  *                                                
272  * Allocate the requested number of bytes from    
273  * Uses the pool allocation function (with fir    
274  * Can not be used in NMI handler on architect    
275  * NMI-safe cmpxchg implementation.               
276  */                                               
277 unsigned long gen_pool_alloc_algo_owner(struct    
278                 genpool_algo_t algo, void *dat    
279 {                                                 
280         struct gen_pool_chunk *chunk;             
281         unsigned long addr = 0;                   
282         int order = pool->min_alloc_order;        
283         unsigned long nbits, start_bit, end_bi    
284                                                   
285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG         
286         BUG_ON(in_nmi());                         
287 #endif                                            
288                                                   
289         if (owner)                                
290                 *owner = NULL;                    
291                                                   
292         if (size == 0)                            
293                 return 0;                         
294                                                   
295         nbits = (size + (1UL << order) - 1) >>    
296         rcu_read_lock();                          
297         list_for_each_entry_rcu(chunk, &pool->    
298                 if (size > atomic_long_read(&c    
299                         continue;                 
300                                                   
301                 start_bit = 0;                    
302                 end_bit = chunk_size(chunk) >>    
303 retry:                                            
304                 start_bit = algo(chunk->bits,     
305                                  nbits, data,     
306                 if (start_bit >= end_bit)         
307                         continue;                 
308                 remain = bitmap_set_ll(chunk->    
309                 if (remain) {                     
310                         remain = bitmap_clear_    
311                                                   
312                         BUG_ON(remain);           
313                         goto retry;               
314                 }                                 
315                                                   
316                 addr = chunk->start_addr + ((u    
317                 size = nbits << order;            
318                 atomic_long_sub(size, &chunk->    
319                 if (owner)                        
320                         *owner = chunk->owner;    
321                 break;                            
322         }                                         
323         rcu_read_unlock();                        
324         return addr;                              
325 }                                                 
326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);         
327                                                   
328 /**                                               
329  * gen_pool_dma_alloc - allocate special memor    
330  * @pool: pool to allocate from                   
331  * @size: number of bytes to allocate from the    
332  * @dma: dma-view physical address return valu    
333  *                                                
334  * Allocate the requested number of bytes from    
335  * Uses the pool allocation function (with fir    
336  * Can not be used in NMI handler on architect    
337  * NMI-safe cmpxchg implementation.               
338  *                                                
339  * Return: virtual address of the allocated me    
340  */                                               
341 void *gen_pool_dma_alloc(struct gen_pool *pool    
342 {                                                 
343         return gen_pool_dma_alloc_algo(pool, s    
344 }                                                 
345 EXPORT_SYMBOL(gen_pool_dma_alloc);                
346                                                   
347 /**                                               
348  * gen_pool_dma_alloc_algo - allocate special     
349  * usage with the given pool algorithm            
350  * @pool: pool to allocate from                   
351  * @size: number of bytes to allocate from the    
352  * @dma: DMA-view physical address return valu    
353  * @algo: algorithm passed from caller            
354  * @data: data passed to algorithm                
355  *                                                
356  * Allocate the requested number of bytes from    
357  * given pool allocation function. Can not be     
358  * architectures without NMI-safe cmpxchg impl    
359  *                                                
360  * Return: virtual address of the allocated me    
361  */                                               
362 void *gen_pool_dma_alloc_algo(struct gen_pool     
363                 dma_addr_t *dma, genpool_algo_    
364 {                                                 
365         unsigned long vaddr;                      
366                                                   
367         if (!pool)                                
368                 return NULL;                      
369                                                   
370         vaddr = gen_pool_alloc_algo(pool, size    
371         if (!vaddr)                               
372                 return NULL;                      
373                                                   
374         if (dma)                                  
375                 *dma = gen_pool_virt_to_phys(p    
376                                                   
377         return (void *)vaddr;                     
378 }                                                 
379 EXPORT_SYMBOL(gen_pool_dma_alloc_algo);           
380                                                   
381 /**                                               
382  * gen_pool_dma_alloc_align - allocate special    
383  * usage with the given alignment                 
384  * @pool: pool to allocate from                   
385  * @size: number of bytes to allocate from the    
386  * @dma: DMA-view physical address return valu    
387  * @align: alignment in bytes for starting add    
388  *                                                
389  * Allocate the requested number bytes from th    
390  * alignment restriction. Can not be used in N    
391  * without NMI-safe cmpxchg implementation.       
392  *                                                
393  * Return: virtual address of the allocated me    
394  */                                               
395 void *gen_pool_dma_alloc_align(struct gen_pool    
396                 dma_addr_t *dma, int align)       
397 {                                                 
398         struct genpool_data_align data = { .al    
399                                                   
400         return gen_pool_dma_alloc_algo(pool, s    
401                         gen_pool_first_fit_ali    
402 }                                                 
403 EXPORT_SYMBOL(gen_pool_dma_alloc_align);          
404                                                   
405 /**                                               
406  * gen_pool_dma_zalloc - allocate special zero    
407  * DMA usage                                      
408  * @pool: pool to allocate from                   
409  * @size: number of bytes to allocate from the    
410  * @dma: dma-view physical address return valu    
411  *                                                
412  * Allocate the requested number of zeroed byt    
413  * Uses the pool allocation function (with fir    
414  * Can not be used in NMI handler on architect    
415  * NMI-safe cmpxchg implementation.               
416  *                                                
417  * Return: virtual address of the allocated ze    
418  */                                               
419 void *gen_pool_dma_zalloc(struct gen_pool *poo    
420 {                                                 
421         return gen_pool_dma_zalloc_algo(pool,     
422 }                                                 
423 EXPORT_SYMBOL(gen_pool_dma_zalloc);               
424                                                   
425 /**                                               
426  * gen_pool_dma_zalloc_algo - allocate special    
427  * DMA usage with the given pool algorithm        
428  * @pool: pool to allocate from                   
429  * @size: number of bytes to allocate from the    
430  * @dma: DMA-view physical address return valu    
431  * @algo: algorithm passed from caller            
432  * @data: data passed to algorithm                
433  *                                                
434  * Allocate the requested number of zeroed byt    
435  * the given pool allocation function. Can not    
436  * architectures without NMI-safe cmpxchg impl    
437  *                                                
438  * Return: virtual address of the allocated ze    
439  */                                               
440 void *gen_pool_dma_zalloc_algo(struct gen_pool    
441                 dma_addr_t *dma, genpool_algo_    
442 {                                                 
443         void *vaddr = gen_pool_dma_alloc_algo(    
444                                                   
445         if (vaddr)                                
446                 memset(vaddr, 0, size);           
447                                                   
448         return vaddr;                             
449 }                                                 
450 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);          
451                                                   
452 /**                                               
453  * gen_pool_dma_zalloc_align - allocate specia    
454  * DMA usage with the given alignment             
455  * @pool: pool to allocate from                   
456  * @size: number of bytes to allocate from the    
457  * @dma: DMA-view physical address return valu    
458  * @align: alignment in bytes for starting add    
459  *                                                
460  * Allocate the requested number of zeroed byt    
461  * with the given alignment restriction. Can n    
462  * architectures without NMI-safe cmpxchg impl    
463  *                                                
464  * Return: virtual address of the allocated ze    
465  */                                               
466 void *gen_pool_dma_zalloc_align(struct gen_poo    
467                 dma_addr_t *dma, int align)       
468 {                                                 
469         struct genpool_data_align data = { .al    
470                                                   
471         return gen_pool_dma_zalloc_algo(pool,     
472                         gen_pool_first_fit_ali    
473 }                                                 
474 EXPORT_SYMBOL(gen_pool_dma_zalloc_align);         
475                                                   
476 /**                                               
477  * gen_pool_free_owner - free allocated specia    
478  * @pool: pool to free to                         
479  * @addr: starting address of memory to free b    
480  * @size: size in bytes of memory to free         
481  * @owner: private data stashed at gen_pool_ad    
482  *                                                
483  * Free previously allocated special memory ba    
484  * pool.  Can not be used in NMI handler on ar    
485  * NMI-safe cmpxchg implementation.               
486  */                                               
487 void gen_pool_free_owner(struct gen_pool *pool    
488                 void **owner)                     
489 {                                                 
490         struct gen_pool_chunk *chunk;             
491         int order = pool->min_alloc_order;        
492         unsigned long start_bit, nbits, remain    
493                                                   
494 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG         
495         BUG_ON(in_nmi());                         
496 #endif                                            
497                                                   
498         if (owner)                                
499                 *owner = NULL;                    
500                                                   
501         nbits = (size + (1UL << order) - 1) >>    
502         rcu_read_lock();                          
503         list_for_each_entry_rcu(chunk, &pool->    
504                 if (addr >= chunk->start_addr     
505                         BUG_ON(addr + size - 1    
506                         start_bit = (addr - ch    
507                         remain = bitmap_clear_    
508                         BUG_ON(remain);           
509                         size = nbits << order;    
510                         atomic_long_add(size,     
511                         if (owner)                
512                                 *owner = chunk    
513                         rcu_read_unlock();        
514                         return;                   
515                 }                                 
516         }                                         
517         rcu_read_unlock();                        
518         BUG();                                    
519 }                                                 
520 EXPORT_SYMBOL(gen_pool_free_owner);               
521                                                   
522 /**                                               
523  * gen_pool_for_each_chunk - call func for eve    
524  * @pool:       the generic memory pool           
525  * @func:       func to call                      
526  * @data:       additional data used by @func     
527  *                                                
528  * Call @func for every chunk of generic memor    
529  * called with rcu_read_lock held.                
530  */                                               
531 void gen_pool_for_each_chunk(struct gen_pool *    
532         void (*func)(struct gen_pool *pool, st    
533         void *data)                               
534 {                                                 
535         struct gen_pool_chunk *chunk;             
536                                                   
537         rcu_read_lock();                          
538         list_for_each_entry_rcu(chunk, &(pool)    
539                 func(pool, chunk, data);          
540         rcu_read_unlock();                        
541 }                                                 
542 EXPORT_SYMBOL(gen_pool_for_each_chunk);           
543                                                   
544 /**                                               
545  * gen_pool_has_addr - checks if an address fa    
546  * @pool:       the generic memory pool           
547  * @start:      start address                     
548  * @size:       size of the region                
549  *                                                
550  * Check if the range of addresses falls withi    
551  * true if the entire range is contained in th    
552  */                                               
553 bool gen_pool_has_addr(struct gen_pool *pool,     
554                         size_t size)              
555 {                                                 
556         bool found = false;                       
557         unsigned long end = start + size - 1;     
558         struct gen_pool_chunk *chunk;             
559                                                   
560         rcu_read_lock();                          
561         list_for_each_entry_rcu(chunk, &(pool)    
562                 if (start >= chunk->start_addr    
563                         if (end <= chunk->end_    
564                                 found = true;     
565                                 break;            
566                         }                         
567                 }                                 
568         }                                         
569         rcu_read_unlock();                        
570         return found;                             
571 }                                                 
572 EXPORT_SYMBOL(gen_pool_has_addr);                 
573                                                   
574 /**                                               
575  * gen_pool_avail - get available free space o    
576  * @pool: pool to get available free space        
577  *                                                
578  * Return available free space of the specifie    
579  */                                               
580 size_t gen_pool_avail(struct gen_pool *pool)      
581 {                                                 
582         struct gen_pool_chunk *chunk;             
583         size_t avail = 0;                         
584                                                   
585         rcu_read_lock();                          
586         list_for_each_entry_rcu(chunk, &pool->    
587                 avail += atomic_long_read(&chu    
588         rcu_read_unlock();                        
589         return avail;                             
590 }                                                 
591 EXPORT_SYMBOL_GPL(gen_pool_avail);                
592                                                   
593 /**                                               
594  * gen_pool_size - get size in bytes of memory    
595  * @pool: pool to get size                        
596  *                                                
597  * Return size in bytes of memory managed by t    
598  */                                               
599 size_t gen_pool_size(struct gen_pool *pool)       
600 {                                                 
601         struct gen_pool_chunk *chunk;             
602         size_t size = 0;                          
603                                                   
604         rcu_read_lock();                          
605         list_for_each_entry_rcu(chunk, &pool->    
606                 size += chunk_size(chunk);        
607         rcu_read_unlock();                        
608         return size;                              
609 }                                                 
610 EXPORT_SYMBOL_GPL(gen_pool_size);                 
611                                                   
612 /**                                               
613  * gen_pool_set_algo - set the allocation algo    
614  * @pool: pool to change allocation algorithm     
615  * @algo: custom algorithm function               
616  * @data: additional data used by @algo           
617  *                                                
618  * Call @algo for each memory allocation in th    
619  * If @algo is NULL use gen_pool_first_fit as     
620  * memory allocation function.                    
621  */                                               
622 void gen_pool_set_algo(struct gen_pool *pool,     
623 {                                                 
624         rcu_read_lock();                          
625                                                   
626         pool->algo = algo;                        
627         if (!pool->algo)                          
628                 pool->algo = gen_pool_first_fi    
629                                                   
630         pool->data = data;                        
631                                                   
632         rcu_read_unlock();                        
633 }                                                 
634 EXPORT_SYMBOL(gen_pool_set_algo);                 
635                                                   
636 /**                                               
637  * gen_pool_first_fit - find the first availab    
638  * of memory matching the size requirement (no    
639  * @map: The address to base the search on        
640  * @size: The bitmap size in bits                 
641  * @start: The bitnumber to start searching at    
642  * @nr: The number of zeroed bits we're lookin    
643  * @data: additional data - unused                
644  * @pool: pool to find the fit region memory f    
645  * @start_addr: not used in this function         
646  */                                               
647 unsigned long gen_pool_first_fit(unsigned long    
648                 unsigned long start, unsigned     
649                 struct gen_pool *pool, unsigne    
650 {                                                 
651         return bitmap_find_next_zero_area(map,    
652 }                                                 
653 EXPORT_SYMBOL(gen_pool_first_fit);                
654                                                   
655 /**                                               
656  * gen_pool_first_fit_align - find the first a    
657  * of memory matching the size requirement (al    
658  * @map: The address to base the search on        
659  * @size: The bitmap size in bits                 
660  * @start: The bitnumber to start searching at    
661  * @nr: The number of zeroed bits we're lookin    
662  * @data: data for alignment                      
663  * @pool: pool to get order from                  
664  * @start_addr: start addr of alloction chunk     
665  */                                               
666 unsigned long gen_pool_first_fit_align(unsigne    
667                 unsigned long start, unsigned     
668                 struct gen_pool *pool, unsigne    
669 {                                                 
670         struct genpool_data_align *alignment;     
671         unsigned long align_mask, align_off;      
672         int order;                                
673                                                   
674         alignment = data;                         
675         order = pool->min_alloc_order;            
676         align_mask = ((alignment->align + (1UL    
677         align_off = (start_addr & (alignment->    
678                                                   
679         return bitmap_find_next_zero_area_off(    
680                                                   
681 }                                                 
682 EXPORT_SYMBOL(gen_pool_first_fit_align);          
683                                                   
684 /**                                               
685  * gen_pool_fixed_alloc - reserve a specific r    
686  * @map: The address to base the search on        
687  * @size: The bitmap size in bits                 
688  * @start: The bitnumber to start searching at    
689  * @nr: The number of zeroed bits we're lookin    
690  * @data: data for alignment                      
691  * @pool: pool to get order from                  
692  * @start_addr: not used in this function         
693  */                                               
694 unsigned long gen_pool_fixed_alloc(unsigned lo    
695                 unsigned long start, unsigned     
696                 struct gen_pool *pool, unsigne    
697 {                                                 
698         struct genpool_data_fixed *fixed_data;    
699         int order;                                
700         unsigned long offset_bit;                 
701         unsigned long start_bit;                  
702                                                   
703         fixed_data = data;                        
704         order = pool->min_alloc_order;            
705         offset_bit = fixed_data->offset >> ord    
706         if (WARN_ON(fixed_data->offset & ((1UL    
707                 return size;                      
708                                                   
709         start_bit = bitmap_find_next_zero_area    
710                         start + offset_bit, nr    
711         if (start_bit != offset_bit)              
712                 start_bit = size;                 
713         return start_bit;                         
714 }                                                 
715 EXPORT_SYMBOL(gen_pool_fixed_alloc);              
716                                                   
717 /**                                               
718  * gen_pool_first_fit_order_align - find the f    
719  * of memory matching the size requirement. Th    
720  * to the order of the size specified.            
721  * @map: The address to base the search on        
722  * @size: The bitmap size in bits                 
723  * @start: The bitnumber to start searching at    
724  * @nr: The number of zeroed bits we're lookin    
725  * @data: additional data - unused                
726  * @pool: pool to find the fit region memory f    
727  * @start_addr: not used in this function         
728  */                                               
729 unsigned long gen_pool_first_fit_order_align(u    
730                 unsigned long size, unsigned l    
731                 unsigned int nr, void *data, s    
732                 unsigned long start_addr)         
733 {                                                 
734         unsigned long align_mask = roundup_pow    
735                                                   
736         return bitmap_find_next_zero_area(map,    
737 }                                                 
738 EXPORT_SYMBOL(gen_pool_first_fit_order_align);    
739                                                   
740 /**                                               
741  * gen_pool_best_fit - find the best fitting r    
742  * matching the size requirement (no alignment    
743  * @map: The address to base the search on        
744  * @size: The bitmap size in bits                 
745  * @start: The bitnumber to start searching at    
746  * @nr: The number of zeroed bits we're lookin    
747  * @data: additional data - unused                
748  * @pool: pool to find the fit region memory f    
749  * @start_addr: not used in this function         
750  *                                                
751  * Iterate over the bitmap to find the smalles    
752  * which we can allocate the memory.              
753  */                                               
754 unsigned long gen_pool_best_fit(unsigned long     
755                 unsigned long start, unsigned     
756                 struct gen_pool *pool, unsigne    
757 {                                                 
758         unsigned long start_bit = size;           
759         unsigned long len = size + 1;             
760         unsigned long index;                      
761                                                   
762         index = bitmap_find_next_zero_area(map    
763                                                   
764         while (index < size) {                    
765                 unsigned long next_bit = find_    
766                 if ((next_bit - index) < len)     
767                         len = next_bit - index    
768                         start_bit = index;        
769                         if (len == nr)            
770                                 return start_b    
771                 }                                 
772                 index = bitmap_find_next_zero_    
773                                                   
774         }                                         
775                                                   
776         return start_bit;                         
777 }                                                 
778 EXPORT_SYMBOL(gen_pool_best_fit);                 
779                                                   
780 static void devm_gen_pool_release(struct devic    
781 {                                                 
782         gen_pool_destroy(*(struct gen_pool **)    
783 }                                                 
784                                                   
785 static int devm_gen_pool_match(struct device *    
786 {                                                 
787         struct gen_pool **p = res;                
788                                                   
789         /* NULL data matches only a pool witho    
790         if (!data && !(*p)->name)                 
791                 return 1;                         
792                                                   
793         if (!data || !(*p)->name)                 
794                 return 0;                         
795                                                   
796         return !strcmp((*p)->name, data);         
797 }                                                 
798                                                   
799 /**                                               
800  * gen_pool_get - Obtain the gen_pool (if any)    
801  * @dev: device to retrieve the gen_pool from     
802  * @name: name of a gen_pool or NULL, identifi    
803  *                                                
804  * Returns the gen_pool for the device if one     
805  */                                               
806 struct gen_pool *gen_pool_get(struct device *d    
807 {                                                 
808         struct gen_pool **p;                      
809                                                   
810         p = devres_find(dev, devm_gen_pool_rel    
811                         (void *)name);            
812         if (!p)                                   
813                 return NULL;                      
814         return *p;                                
815 }                                                 
816 EXPORT_SYMBOL_GPL(gen_pool_get);                  
817                                                   
818 /**                                               
819  * devm_gen_pool_create - managed gen_pool_cre    
820  * @dev: device that provides the gen_pool        
821  * @min_alloc_order: log base 2 of number of b    
822  * @nid: node selector for allocated gen_pool,    
823  * @name: name of a gen_pool or NULL, identifi    
824  *                                                
825  * Create a new special memory pool that can b    
826  * memory not managed by the regular kmalloc/k    
827  * automatically destroyed by the device manag    
828  */                                               
829 struct gen_pool *devm_gen_pool_create(struct d    
830                                       int nid,    
831 {                                                 
832         struct gen_pool **ptr, *pool;             
833         const char *pool_name = NULL;             
834                                                   
835         /* Check that genpool to be created is    
836         if (gen_pool_get(dev, name))              
837                 return ERR_PTR(-EINVAL);          
838                                                   
839         if (name) {                               
840                 pool_name = kstrdup_const(name    
841                 if (!pool_name)                   
842                         return ERR_PTR(-ENOMEM    
843         }                                         
844                                                   
845         ptr = devres_alloc(devm_gen_pool_relea    
846         if (!ptr)                                 
847                 goto free_pool_name;              
848                                                   
849         pool = gen_pool_create(min_alloc_order    
850         if (!pool)                                
851                 goto free_devres;                 
852                                                   
853         *ptr = pool;                              
854         pool->name = pool_name;                   
855         devres_add(dev, ptr);                     
856                                                   
857         return pool;                              
858                                                   
859 free_devres:                                      
860         devres_free(ptr);                         
861 free_pool_name:                                   
862         kfree_const(pool_name);                   
863                                                   
864         return ERR_PTR(-ENOMEM);                  
865 }                                                 
866 EXPORT_SYMBOL(devm_gen_pool_create);              
867                                                   
868 #ifdef CONFIG_OF                                  
869 /**                                               
870  * of_gen_pool_get - find a pool by phandle pr    
871  * @np: device node                               
872  * @propname: property name containing phandle    
873  * @index: index into the phandle array           
874  *                                                
875  * Returns the pool that contains the chunk st    
876  * address of the device tree node pointed at     
877  * or NULL if not found.                          
878  */                                               
879 struct gen_pool *of_gen_pool_get(struct device    
880         const char *propname, int index)          
881 {                                                 
882         struct platform_device *pdev;             
883         struct device_node *np_pool, *parent;     
884         const char *name = NULL;                  
885         struct gen_pool *pool = NULL;             
886                                                   
887         np_pool = of_parse_phandle(np, propnam    
888         if (!np_pool)                             
889                 return NULL;                      
890                                                   
891         pdev = of_find_device_by_node(np_pool)    
892         if (!pdev) {                              
893                 /* Check if named gen_pool is     
894                 parent = of_get_parent(np_pool    
895                 pdev = of_find_device_by_node(    
896                 of_node_put(parent);              
897                                                   
898                 of_property_read_string(np_poo    
899                 if (!name)                        
900                         name = of_node_full_na    
901         }                                         
902         if (pdev)                                 
903                 pool = gen_pool_get(&pdev->dev    
904         of_node_put(np_pool);                     
905                                                   
906         return pool;                              
907 }                                                 
908 EXPORT_SYMBOL_GPL(of_gen_pool_get);               
909 #endif /* CONFIG_OF */                            
910                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php