~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/genalloc.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /lib/genalloc.c (Architecture mips) and /lib/genalloc.c (Architecture ppc)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * Basic general purpose allocator for managin      3  * Basic general purpose allocator for managing special purpose
  4  * memory, for example, memory that is not man      4  * memory, for example, memory that is not managed by the regular
  5  * kmalloc/kfree interface.  Uses for this inc      5  * kmalloc/kfree interface.  Uses for this includes on-device special
  6  * memory, uncached memory etc.                     6  * memory, uncached memory etc.
  7  *                                                  7  *
  8  * It is safe to use the allocator in NMI hand      8  * It is safe to use the allocator in NMI handlers and other special
  9  * unblockable contexts that could otherwise d      9  * unblockable contexts that could otherwise deadlock on locks.  This
 10  * is implemented by using atomic operations a     10  * is implemented by using atomic operations and retries on any
 11  * conflicts.  The disadvantage is that there      11  * conflicts.  The disadvantage is that there may be livelocks in
 12  * extreme cases.  For better scalability, one     12  * extreme cases.  For better scalability, one allocator can be used
 13  * for each CPU.                                   13  * for each CPU.
 14  *                                                 14  *
 15  * The lockless operation only works if there      15  * The lockless operation only works if there is enough memory
 16  * available.  If new memory is added to the p     16  * available.  If new memory is added to the pool a lock has to be
 17  * still taken.  So any user relying on lockle     17  * still taken.  So any user relying on locklessness has to ensure
 18  * that sufficient memory is preallocated.         18  * that sufficient memory is preallocated.
 19  *                                                 19  *
 20  * The basic atomic operation of this allocato     20  * The basic atomic operation of this allocator is cmpxchg on long.
 21  * On architectures that don't have NMI-safe c     21  * On architectures that don't have NMI-safe cmpxchg implementation,
 22  * the allocator can NOT be used in NMI handle     22  * the allocator can NOT be used in NMI handler.  So code uses the
 23  * allocator in NMI handler should depend on       23  * allocator in NMI handler should depend on
 24  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.              24  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
 25  *                                                 25  *
 26  * Copyright 2005 (C) Jes Sorensen <jes@traine     26  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
 27  */                                                27  */
 28                                                    28 
 29 #include <linux/slab.h>                            29 #include <linux/slab.h>
 30 #include <linux/export.h>                          30 #include <linux/export.h>
 31 #include <linux/bitmap.h>                          31 #include <linux/bitmap.h>
 32 #include <linux/rculist.h>                         32 #include <linux/rculist.h>
 33 #include <linux/interrupt.h>                       33 #include <linux/interrupt.h>
 34 #include <linux/genalloc.h>                        34 #include <linux/genalloc.h>
 35 #include <linux/of.h>                              35 #include <linux/of.h>
 36 #include <linux/of_platform.h>                     36 #include <linux/of_platform.h>
 37 #include <linux/platform_device.h>                 37 #include <linux/platform_device.h>
 38 #include <linux/vmalloc.h>                         38 #include <linux/vmalloc.h>
 39                                                    39 
 40 static inline size_t chunk_size(const struct g     40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
 41 {                                                  41 {
 42         return chunk->end_addr - chunk->start_     42         return chunk->end_addr - chunk->start_addr + 1;
 43 }                                                  43 }
 44                                                    44 
 45 static inline int                                  45 static inline int
 46 set_bits_ll(unsigned long *addr, unsigned long     46 set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
 47 {                                                  47 {
 48         unsigned long val = READ_ONCE(*addr);      48         unsigned long val = READ_ONCE(*addr);
 49                                                    49 
 50         do {                                       50         do {
 51                 if (val & mask_to_set)             51                 if (val & mask_to_set)
 52                         return -EBUSY;             52                         return -EBUSY;
 53                 cpu_relax();                       53                 cpu_relax();
 54         } while (!try_cmpxchg(addr, &val, val      54         } while (!try_cmpxchg(addr, &val, val | mask_to_set));
 55                                                    55 
 56         return 0;                                  56         return 0;
 57 }                                                  57 }
 58                                                    58 
 59 static inline int                                  59 static inline int
 60 clear_bits_ll(unsigned long *addr, unsigned lo     60 clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
 61 {                                                  61 {
 62         unsigned long val = READ_ONCE(*addr);      62         unsigned long val = READ_ONCE(*addr);
 63                                                    63 
 64         do {                                       64         do {
 65                 if ((val & mask_to_clear) != m     65                 if ((val & mask_to_clear) != mask_to_clear)
 66                         return -EBUSY;             66                         return -EBUSY;
 67                 cpu_relax();                       67                 cpu_relax();
 68         } while (!try_cmpxchg(addr, &val, val      68         } while (!try_cmpxchg(addr, &val, val & ~mask_to_clear));
 69                                                    69 
 70         return 0;                                  70         return 0;
 71 }                                                  71 }
 72                                                    72 
 73 /*                                                 73 /*
 74  * bitmap_set_ll - set the specified number of     74  * bitmap_set_ll - set the specified number of bits at the specified position
 75  * @map: pointer to a bitmap                       75  * @map: pointer to a bitmap
 76  * @start: a bit position in @map                  76  * @start: a bit position in @map
 77  * @nr: number of bits to set                      77  * @nr: number of bits to set
 78  *                                                 78  *
 79  * Set @nr bits start from @start in @map lock     79  * Set @nr bits start from @start in @map lock-lessly. Several users
 80  * can set/clear the same bitmap simultaneousl     80  * can set/clear the same bitmap simultaneously without lock. If two
 81  * users set the same bit, one user will retur     81  * users set the same bit, one user will return remain bits, otherwise
 82  * return 0.                                       82  * return 0.
 83  */                                                83  */
 84 static unsigned long                               84 static unsigned long
 85 bitmap_set_ll(unsigned long *map, unsigned lon     85 bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
 86 {                                                  86 {
 87         unsigned long *p = map + BIT_WORD(star     87         unsigned long *p = map + BIT_WORD(start);
 88         const unsigned long size = start + nr;     88         const unsigned long size = start + nr;
 89         int bits_to_set = BITS_PER_LONG - (sta     89         int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
 90         unsigned long mask_to_set = BITMAP_FIR     90         unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 91                                                    91 
 92         while (nr >= bits_to_set) {                92         while (nr >= bits_to_set) {
 93                 if (set_bits_ll(p, mask_to_set     93                 if (set_bits_ll(p, mask_to_set))
 94                         return nr;                 94                         return nr;
 95                 nr -= bits_to_set;                 95                 nr -= bits_to_set;
 96                 bits_to_set = BITS_PER_LONG;       96                 bits_to_set = BITS_PER_LONG;
 97                 mask_to_set = ~0UL;                97                 mask_to_set = ~0UL;
 98                 p++;                               98                 p++;
 99         }                                          99         }
100         if (nr) {                                 100         if (nr) {
101                 mask_to_set &= BITMAP_LAST_WOR    101                 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102                 if (set_bits_ll(p, mask_to_set    102                 if (set_bits_ll(p, mask_to_set))
103                         return nr;                103                         return nr;
104         }                                         104         }
105                                                   105 
106         return 0;                                 106         return 0;
107 }                                                 107 }
108                                                   108 
109 /*                                                109 /*
110  * bitmap_clear_ll - clear the specified numbe    110  * bitmap_clear_ll - clear the specified number of bits at the specified position
111  * @map: pointer to a bitmap                      111  * @map: pointer to a bitmap
112  * @start: a bit position in @map                 112  * @start: a bit position in @map
113  * @nr: number of bits to set                     113  * @nr: number of bits to set
114  *                                                114  *
115  * Clear @nr bits start from @start in @map lo    115  * Clear @nr bits start from @start in @map lock-lessly. Several users
116  * can set/clear the same bitmap simultaneousl    116  * can set/clear the same bitmap simultaneously without lock. If two
117  * users clear the same bit, one user will ret    117  * users clear the same bit, one user will return remain bits,
118  * otherwise return 0.                            118  * otherwise return 0.
119  */                                               119  */
120 static unsigned long                              120 static unsigned long
121 bitmap_clear_ll(unsigned long *map, unsigned l    121 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
122 {                                                 122 {
123         unsigned long *p = map + BIT_WORD(star    123         unsigned long *p = map + BIT_WORD(start);
124         const unsigned long size = start + nr;    124         const unsigned long size = start + nr;
125         int bits_to_clear = BITS_PER_LONG - (s    125         int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126         unsigned long mask_to_clear = BITMAP_F    126         unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127                                                   127 
128         while (nr >= bits_to_clear) {             128         while (nr >= bits_to_clear) {
129                 if (clear_bits_ll(p, mask_to_c    129                 if (clear_bits_ll(p, mask_to_clear))
130                         return nr;                130                         return nr;
131                 nr -= bits_to_clear;              131                 nr -= bits_to_clear;
132                 bits_to_clear = BITS_PER_LONG;    132                 bits_to_clear = BITS_PER_LONG;
133                 mask_to_clear = ~0UL;             133                 mask_to_clear = ~0UL;
134                 p++;                              134                 p++;
135         }                                         135         }
136         if (nr) {                                 136         if (nr) {
137                 mask_to_clear &= BITMAP_LAST_W    137                 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138                 if (clear_bits_ll(p, mask_to_c    138                 if (clear_bits_ll(p, mask_to_clear))
139                         return nr;                139                         return nr;
140         }                                         140         }
141                                                   141 
142         return 0;                                 142         return 0;
143 }                                                 143 }
144                                                   144 
145 /**                                               145 /**
146  * gen_pool_create - create a new special memo    146  * gen_pool_create - create a new special memory pool
147  * @min_alloc_order: log base 2 of number of b    147  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148  * @nid: node id of the node the pool structur    148  * @nid: node id of the node the pool structure should be allocated on, or -1
149  *                                                149  *
150  * Create a new special memory pool that can b    150  * Create a new special memory pool that can be used to manage special purpose
151  * memory not managed by the regular kmalloc/k    151  * memory not managed by the regular kmalloc/kfree interface.
152  */                                               152  */
153 struct gen_pool *gen_pool_create(int min_alloc    153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154 {                                                 154 {
155         struct gen_pool *pool;                    155         struct gen_pool *pool;
156                                                   156 
157         pool = kmalloc_node(sizeof(struct gen_    157         pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158         if (pool != NULL) {                       158         if (pool != NULL) {
159                 spin_lock_init(&pool->lock);      159                 spin_lock_init(&pool->lock);
160                 INIT_LIST_HEAD(&pool->chunks);    160                 INIT_LIST_HEAD(&pool->chunks);
161                 pool->min_alloc_order = min_al    161                 pool->min_alloc_order = min_alloc_order;
162                 pool->algo = gen_pool_first_fi    162                 pool->algo = gen_pool_first_fit;
163                 pool->data = NULL;                163                 pool->data = NULL;
164                 pool->name = NULL;                164                 pool->name = NULL;
165         }                                         165         }
166         return pool;                              166         return pool;
167 }                                                 167 }
168 EXPORT_SYMBOL(gen_pool_create);                   168 EXPORT_SYMBOL(gen_pool_create);
169                                                   169 
170 /**                                               170 /**
171  * gen_pool_add_owner- add a new chunk of spec    171  * gen_pool_add_owner- add a new chunk of special memory to the pool
172  * @pool: pool to add new memory chunk to         172  * @pool: pool to add new memory chunk to
173  * @virt: virtual starting address of memory c    173  * @virt: virtual starting address of memory chunk to add to pool
174  * @phys: physical starting address of memory     174  * @phys: physical starting address of memory chunk to add to pool
175  * @size: size in bytes of the memory chunk to    175  * @size: size in bytes of the memory chunk to add to pool
176  * @nid: node id of the node the chunk structu    176  * @nid: node id of the node the chunk structure and bitmap should be
177  *       allocated on, or -1                      177  *       allocated on, or -1
178  * @owner: private data the publisher would li    178  * @owner: private data the publisher would like to recall at alloc time
179  *                                                179  *
180  * Add a new chunk of special memory to the sp    180  * Add a new chunk of special memory to the specified pool.
181  *                                                181  *
182  * Returns 0 on success or a -ve errno on fail    182  * Returns 0 on success or a -ve errno on failure.
183  */                                               183  */
184 int gen_pool_add_owner(struct gen_pool *pool,     184 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185                  size_t size, int nid, void *o    185                  size_t size, int nid, void *owner)
186 {                                                 186 {
187         struct gen_pool_chunk *chunk;             187         struct gen_pool_chunk *chunk;
188         unsigned long nbits = size >> pool->mi    188         unsigned long nbits = size >> pool->min_alloc_order;
189         unsigned long nbytes = sizeof(struct g    189         unsigned long nbytes = sizeof(struct gen_pool_chunk) +
190                                 BITS_TO_LONGS(    190                                 BITS_TO_LONGS(nbits) * sizeof(long);
191                                                   191 
192         chunk = vzalloc_node(nbytes, nid);        192         chunk = vzalloc_node(nbytes, nid);
193         if (unlikely(chunk == NULL))              193         if (unlikely(chunk == NULL))
194                 return -ENOMEM;                   194                 return -ENOMEM;
195                                                   195 
196         chunk->phys_addr = phys;                  196         chunk->phys_addr = phys;
197         chunk->start_addr = virt;                 197         chunk->start_addr = virt;
198         chunk->end_addr = virt + size - 1;        198         chunk->end_addr = virt + size - 1;
199         chunk->owner = owner;                     199         chunk->owner = owner;
200         atomic_long_set(&chunk->avail, size);     200         atomic_long_set(&chunk->avail, size);
201                                                   201 
202         spin_lock(&pool->lock);                   202         spin_lock(&pool->lock);
203         list_add_rcu(&chunk->next_chunk, &pool    203         list_add_rcu(&chunk->next_chunk, &pool->chunks);
204         spin_unlock(&pool->lock);                 204         spin_unlock(&pool->lock);
205                                                   205 
206         return 0;                                 206         return 0;
207 }                                                 207 }
208 EXPORT_SYMBOL(gen_pool_add_owner);                208 EXPORT_SYMBOL(gen_pool_add_owner);
209                                                   209 
210 /**                                               210 /**
211  * gen_pool_virt_to_phys - return the physical    211  * gen_pool_virt_to_phys - return the physical address of memory
212  * @pool: pool to allocate from                   212  * @pool: pool to allocate from
213  * @addr: starting address of memory              213  * @addr: starting address of memory
214  *                                                214  *
215  * Returns the physical address on success, or    215  * Returns the physical address on success, or -1 on error.
216  */                                               216  */
217 phys_addr_t gen_pool_virt_to_phys(struct gen_p    217 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
218 {                                                 218 {
219         struct gen_pool_chunk *chunk;             219         struct gen_pool_chunk *chunk;
220         phys_addr_t paddr = -1;                   220         phys_addr_t paddr = -1;
221                                                   221 
222         rcu_read_lock();                          222         rcu_read_lock();
223         list_for_each_entry_rcu(chunk, &pool->    223         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
224                 if (addr >= chunk->start_addr     224                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
225                         paddr = chunk->phys_ad    225                         paddr = chunk->phys_addr + (addr - chunk->start_addr);
226                         break;                    226                         break;
227                 }                                 227                 }
228         }                                         228         }
229         rcu_read_unlock();                        229         rcu_read_unlock();
230                                                   230 
231         return paddr;                             231         return paddr;
232 }                                                 232 }
233 EXPORT_SYMBOL(gen_pool_virt_to_phys);             233 EXPORT_SYMBOL(gen_pool_virt_to_phys);
234                                                   234 
235 /**                                               235 /**
236  * gen_pool_destroy - destroy a special memory    236  * gen_pool_destroy - destroy a special memory pool
237  * @pool: pool to destroy                         237  * @pool: pool to destroy
238  *                                                238  *
239  * Destroy the specified special memory pool.     239  * Destroy the specified special memory pool. Verifies that there are no
240  * outstanding allocations.                       240  * outstanding allocations.
241  */                                               241  */
242 void gen_pool_destroy(struct gen_pool *pool)      242 void gen_pool_destroy(struct gen_pool *pool)
243 {                                                 243 {
244         struct list_head *_chunk, *_next_chunk    244         struct list_head *_chunk, *_next_chunk;
245         struct gen_pool_chunk *chunk;             245         struct gen_pool_chunk *chunk;
246         int order = pool->min_alloc_order;        246         int order = pool->min_alloc_order;
247         unsigned long bit, end_bit;               247         unsigned long bit, end_bit;
248                                                   248 
249         list_for_each_safe(_chunk, _next_chunk    249         list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
250                 chunk = list_entry(_chunk, str    250                 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
251                 list_del(&chunk->next_chunk);     251                 list_del(&chunk->next_chunk);
252                                                   252 
253                 end_bit = chunk_size(chunk) >>    253                 end_bit = chunk_size(chunk) >> order;
254                 bit = find_first_bit(chunk->bi    254                 bit = find_first_bit(chunk->bits, end_bit);
255                 BUG_ON(bit < end_bit);            255                 BUG_ON(bit < end_bit);
256                                                   256 
257                 vfree(chunk);                     257                 vfree(chunk);
258         }                                         258         }
259         kfree_const(pool->name);                  259         kfree_const(pool->name);
260         kfree(pool);                              260         kfree(pool);
261 }                                                 261 }
262 EXPORT_SYMBOL(gen_pool_destroy);                  262 EXPORT_SYMBOL(gen_pool_destroy);
263                                                   263 
264 /**                                               264 /**
265  * gen_pool_alloc_algo_owner - allocate specia    265  * gen_pool_alloc_algo_owner - allocate special memory from the pool
266  * @pool: pool to allocate from                   266  * @pool: pool to allocate from
267  * @size: number of bytes to allocate from the    267  * @size: number of bytes to allocate from the pool
268  * @algo: algorithm passed from caller            268  * @algo: algorithm passed from caller
269  * @data: data passed to algorithm                269  * @data: data passed to algorithm
270  * @owner: optionally retrieve the chunk owner    270  * @owner: optionally retrieve the chunk owner
271  *                                                271  *
272  * Allocate the requested number of bytes from    272  * Allocate the requested number of bytes from the specified pool.
273  * Uses the pool allocation function (with fir    273  * Uses the pool allocation function (with first-fit algorithm by default).
274  * Can not be used in NMI handler on architect    274  * Can not be used in NMI handler on architectures without
275  * NMI-safe cmpxchg implementation.               275  * NMI-safe cmpxchg implementation.
276  */                                               276  */
277 unsigned long gen_pool_alloc_algo_owner(struct    277 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
278                 genpool_algo_t algo, void *dat    278                 genpool_algo_t algo, void *data, void **owner)
279 {                                                 279 {
280         struct gen_pool_chunk *chunk;             280         struct gen_pool_chunk *chunk;
281         unsigned long addr = 0;                   281         unsigned long addr = 0;
282         int order = pool->min_alloc_order;        282         int order = pool->min_alloc_order;
283         unsigned long nbits, start_bit, end_bi    283         unsigned long nbits, start_bit, end_bit, remain;
284                                                   284 
285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG         285 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
286         BUG_ON(in_nmi());                         286         BUG_ON(in_nmi());
287 #endif                                            287 #endif
288                                                   288 
289         if (owner)                                289         if (owner)
290                 *owner = NULL;                    290                 *owner = NULL;
291                                                   291 
292         if (size == 0)                            292         if (size == 0)
293                 return 0;                         293                 return 0;
294                                                   294 
295         nbits = (size + (1UL << order) - 1) >>    295         nbits = (size + (1UL << order) - 1) >> order;
296         rcu_read_lock();                          296         rcu_read_lock();
297         list_for_each_entry_rcu(chunk, &pool->    297         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
298                 if (size > atomic_long_read(&c    298                 if (size > atomic_long_read(&chunk->avail))
299                         continue;                 299                         continue;
300                                                   300 
301                 start_bit = 0;                    301                 start_bit = 0;
302                 end_bit = chunk_size(chunk) >>    302                 end_bit = chunk_size(chunk) >> order;
303 retry:                                            303 retry:
304                 start_bit = algo(chunk->bits,     304                 start_bit = algo(chunk->bits, end_bit, start_bit,
305                                  nbits, data,     305                                  nbits, data, pool, chunk->start_addr);
306                 if (start_bit >= end_bit)         306                 if (start_bit >= end_bit)
307                         continue;                 307                         continue;
308                 remain = bitmap_set_ll(chunk->    308                 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
309                 if (remain) {                     309                 if (remain) {
310                         remain = bitmap_clear_    310                         remain = bitmap_clear_ll(chunk->bits, start_bit,
311                                                   311                                                  nbits - remain);
312                         BUG_ON(remain);           312                         BUG_ON(remain);
313                         goto retry;               313                         goto retry;
314                 }                                 314                 }
315                                                   315 
316                 addr = chunk->start_addr + ((u    316                 addr = chunk->start_addr + ((unsigned long)start_bit << order);
317                 size = nbits << order;            317                 size = nbits << order;
318                 atomic_long_sub(size, &chunk->    318                 atomic_long_sub(size, &chunk->avail);
319                 if (owner)                        319                 if (owner)
320                         *owner = chunk->owner;    320                         *owner = chunk->owner;
321                 break;                            321                 break;
322         }                                         322         }
323         rcu_read_unlock();                        323         rcu_read_unlock();
324         return addr;                              324         return addr;
325 }                                                 325 }
326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);         326 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
327                                                   327 
328 /**                                               328 /**
329  * gen_pool_dma_alloc - allocate special memor    329  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
330  * @pool: pool to allocate from                   330  * @pool: pool to allocate from
331  * @size: number of bytes to allocate from the    331  * @size: number of bytes to allocate from the pool
332  * @dma: dma-view physical address return valu    332  * @dma: dma-view physical address return value.  Use %NULL if unneeded.
333  *                                                333  *
334  * Allocate the requested number of bytes from    334  * Allocate the requested number of bytes from the specified pool.
335  * Uses the pool allocation function (with fir    335  * Uses the pool allocation function (with first-fit algorithm by default).
336  * Can not be used in NMI handler on architect    336  * Can not be used in NMI handler on architectures without
337  * NMI-safe cmpxchg implementation.               337  * NMI-safe cmpxchg implementation.
338  *                                                338  *
339  * Return: virtual address of the allocated me    339  * Return: virtual address of the allocated memory, or %NULL on failure
340  */                                               340  */
341 void *gen_pool_dma_alloc(struct gen_pool *pool    341 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
342 {                                                 342 {
343         return gen_pool_dma_alloc_algo(pool, s    343         return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
344 }                                                 344 }
345 EXPORT_SYMBOL(gen_pool_dma_alloc);                345 EXPORT_SYMBOL(gen_pool_dma_alloc);
346                                                   346 
347 /**                                               347 /**
348  * gen_pool_dma_alloc_algo - allocate special     348  * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
349  * usage with the given pool algorithm            349  * usage with the given pool algorithm
350  * @pool: pool to allocate from                   350  * @pool: pool to allocate from
351  * @size: number of bytes to allocate from the    351  * @size: number of bytes to allocate from the pool
352  * @dma: DMA-view physical address return valu    352  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
353  * @algo: algorithm passed from caller            353  * @algo: algorithm passed from caller
354  * @data: data passed to algorithm                354  * @data: data passed to algorithm
355  *                                                355  *
356  * Allocate the requested number of bytes from    356  * Allocate the requested number of bytes from the specified pool. Uses the
357  * given pool allocation function. Can not be     357  * given pool allocation function. Can not be used in NMI handler on
358  * architectures without NMI-safe cmpxchg impl    358  * architectures without NMI-safe cmpxchg implementation.
359  *                                                359  *
360  * Return: virtual address of the allocated me    360  * Return: virtual address of the allocated memory, or %NULL on failure
361  */                                               361  */
362 void *gen_pool_dma_alloc_algo(struct gen_pool     362 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
363                 dma_addr_t *dma, genpool_algo_    363                 dma_addr_t *dma, genpool_algo_t algo, void *data)
364 {                                                 364 {
365         unsigned long vaddr;                      365         unsigned long vaddr;
366                                                   366 
367         if (!pool)                                367         if (!pool)
368                 return NULL;                      368                 return NULL;
369                                                   369 
370         vaddr = gen_pool_alloc_algo(pool, size    370         vaddr = gen_pool_alloc_algo(pool, size, algo, data);
371         if (!vaddr)                               371         if (!vaddr)
372                 return NULL;                      372                 return NULL;
373                                                   373 
374         if (dma)                                  374         if (dma)
375                 *dma = gen_pool_virt_to_phys(p    375                 *dma = gen_pool_virt_to_phys(pool, vaddr);
376                                                   376 
377         return (void *)vaddr;                     377         return (void *)vaddr;
378 }                                                 378 }
379 EXPORT_SYMBOL(gen_pool_dma_alloc_algo);           379 EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
380                                                   380 
381 /**                                               381 /**
382  * gen_pool_dma_alloc_align - allocate special    382  * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
383  * usage with the given alignment                 383  * usage with the given alignment
384  * @pool: pool to allocate from                   384  * @pool: pool to allocate from
385  * @size: number of bytes to allocate from the    385  * @size: number of bytes to allocate from the pool
386  * @dma: DMA-view physical address return valu    386  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
387  * @align: alignment in bytes for starting add    387  * @align: alignment in bytes for starting address
388  *                                                388  *
389  * Allocate the requested number bytes from th    389  * Allocate the requested number bytes from the specified pool, with the given
390  * alignment restriction. Can not be used in N    390  * alignment restriction. Can not be used in NMI handler on architectures
391  * without NMI-safe cmpxchg implementation.       391  * without NMI-safe cmpxchg implementation.
392  *                                                392  *
393  * Return: virtual address of the allocated me    393  * Return: virtual address of the allocated memory, or %NULL on failure
394  */                                               394  */
395 void *gen_pool_dma_alloc_align(struct gen_pool    395 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
396                 dma_addr_t *dma, int align)       396                 dma_addr_t *dma, int align)
397 {                                                 397 {
398         struct genpool_data_align data = { .al    398         struct genpool_data_align data = { .align = align };
399                                                   399 
400         return gen_pool_dma_alloc_algo(pool, s    400         return gen_pool_dma_alloc_algo(pool, size, dma,
401                         gen_pool_first_fit_ali    401                         gen_pool_first_fit_align, &data);
402 }                                                 402 }
403 EXPORT_SYMBOL(gen_pool_dma_alloc_align);          403 EXPORT_SYMBOL(gen_pool_dma_alloc_align);
404                                                   404 
405 /**                                               405 /**
406  * gen_pool_dma_zalloc - allocate special zero    406  * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
407  * DMA usage                                      407  * DMA usage
408  * @pool: pool to allocate from                   408  * @pool: pool to allocate from
409  * @size: number of bytes to allocate from the    409  * @size: number of bytes to allocate from the pool
410  * @dma: dma-view physical address return valu    410  * @dma: dma-view physical address return value.  Use %NULL if unneeded.
411  *                                                411  *
412  * Allocate the requested number of zeroed byt    412  * Allocate the requested number of zeroed bytes from the specified pool.
413  * Uses the pool allocation function (with fir    413  * Uses the pool allocation function (with first-fit algorithm by default).
414  * Can not be used in NMI handler on architect    414  * Can not be used in NMI handler on architectures without
415  * NMI-safe cmpxchg implementation.               415  * NMI-safe cmpxchg implementation.
416  *                                                416  *
417  * Return: virtual address of the allocated ze    417  * Return: virtual address of the allocated zeroed memory, or %NULL on failure
418  */                                               418  */
419 void *gen_pool_dma_zalloc(struct gen_pool *poo    419 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
420 {                                                 420 {
421         return gen_pool_dma_zalloc_algo(pool,     421         return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
422 }                                                 422 }
423 EXPORT_SYMBOL(gen_pool_dma_zalloc);               423 EXPORT_SYMBOL(gen_pool_dma_zalloc);
424                                                   424 
425 /**                                               425 /**
426  * gen_pool_dma_zalloc_algo - allocate special    426  * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
427  * DMA usage with the given pool algorithm        427  * DMA usage with the given pool algorithm
428  * @pool: pool to allocate from                   428  * @pool: pool to allocate from
429  * @size: number of bytes to allocate from the    429  * @size: number of bytes to allocate from the pool
430  * @dma: DMA-view physical address return valu    430  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
431  * @algo: algorithm passed from caller            431  * @algo: algorithm passed from caller
432  * @data: data passed to algorithm                432  * @data: data passed to algorithm
433  *                                                433  *
434  * Allocate the requested number of zeroed byt    434  * Allocate the requested number of zeroed bytes from the specified pool. Uses
435  * the given pool allocation function. Can not    435  * the given pool allocation function. Can not be used in NMI handler on
436  * architectures without NMI-safe cmpxchg impl    436  * architectures without NMI-safe cmpxchg implementation.
437  *                                                437  *
438  * Return: virtual address of the allocated ze    438  * Return: virtual address of the allocated zeroed memory, or %NULL on failure
439  */                                               439  */
440 void *gen_pool_dma_zalloc_algo(struct gen_pool    440 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
441                 dma_addr_t *dma, genpool_algo_    441                 dma_addr_t *dma, genpool_algo_t algo, void *data)
442 {                                                 442 {
443         void *vaddr = gen_pool_dma_alloc_algo(    443         void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
444                                                   444 
445         if (vaddr)                                445         if (vaddr)
446                 memset(vaddr, 0, size);           446                 memset(vaddr, 0, size);
447                                                   447 
448         return vaddr;                             448         return vaddr;
449 }                                                 449 }
450 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);          450 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
451                                                   451 
452 /**                                               452 /**
453  * gen_pool_dma_zalloc_align - allocate specia    453  * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
454  * DMA usage with the given alignment             454  * DMA usage with the given alignment
455  * @pool: pool to allocate from                   455  * @pool: pool to allocate from
456  * @size: number of bytes to allocate from the    456  * @size: number of bytes to allocate from the pool
457  * @dma: DMA-view physical address return valu    457  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
458  * @align: alignment in bytes for starting add    458  * @align: alignment in bytes for starting address
459  *                                                459  *
460  * Allocate the requested number of zeroed byt    460  * Allocate the requested number of zeroed bytes from the specified pool,
461  * with the given alignment restriction. Can n    461  * with the given alignment restriction. Can not be used in NMI handler on
462  * architectures without NMI-safe cmpxchg impl    462  * architectures without NMI-safe cmpxchg implementation.
463  *                                                463  *
464  * Return: virtual address of the allocated ze    464  * Return: virtual address of the allocated zeroed memory, or %NULL on failure
465  */                                               465  */
466 void *gen_pool_dma_zalloc_align(struct gen_poo    466 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
467                 dma_addr_t *dma, int align)       467                 dma_addr_t *dma, int align)
468 {                                                 468 {
469         struct genpool_data_align data = { .al    469         struct genpool_data_align data = { .align = align };
470                                                   470 
471         return gen_pool_dma_zalloc_algo(pool,     471         return gen_pool_dma_zalloc_algo(pool, size, dma,
472                         gen_pool_first_fit_ali    472                         gen_pool_first_fit_align, &data);
473 }                                                 473 }
474 EXPORT_SYMBOL(gen_pool_dma_zalloc_align);         474 EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
475                                                   475 
476 /**                                               476 /**
477  * gen_pool_free_owner - free allocated specia    477  * gen_pool_free_owner - free allocated special memory back to the pool
478  * @pool: pool to free to                         478  * @pool: pool to free to
479  * @addr: starting address of memory to free b    479  * @addr: starting address of memory to free back to pool
480  * @size: size in bytes of memory to free         480  * @size: size in bytes of memory to free
481  * @owner: private data stashed at gen_pool_ad    481  * @owner: private data stashed at gen_pool_add() time
482  *                                                482  *
483  * Free previously allocated special memory ba    483  * Free previously allocated special memory back to the specified
484  * pool.  Can not be used in NMI handler on ar    484  * pool.  Can not be used in NMI handler on architectures without
485  * NMI-safe cmpxchg implementation.               485  * NMI-safe cmpxchg implementation.
486  */                                               486  */
487 void gen_pool_free_owner(struct gen_pool *pool    487 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
488                 void **owner)                     488                 void **owner)
489 {                                                 489 {
490         struct gen_pool_chunk *chunk;             490         struct gen_pool_chunk *chunk;
491         int order = pool->min_alloc_order;        491         int order = pool->min_alloc_order;
492         unsigned long start_bit, nbits, remain    492         unsigned long start_bit, nbits, remain;
493                                                   493 
494 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG         494 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
495         BUG_ON(in_nmi());                         495         BUG_ON(in_nmi());
496 #endif                                            496 #endif
497                                                   497 
498         if (owner)                                498         if (owner)
499                 *owner = NULL;                    499                 *owner = NULL;
500                                                   500 
501         nbits = (size + (1UL << order) - 1) >>    501         nbits = (size + (1UL << order) - 1) >> order;
502         rcu_read_lock();                          502         rcu_read_lock();
503         list_for_each_entry_rcu(chunk, &pool->    503         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
504                 if (addr >= chunk->start_addr     504                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
505                         BUG_ON(addr + size - 1    505                         BUG_ON(addr + size - 1 > chunk->end_addr);
506                         start_bit = (addr - ch    506                         start_bit = (addr - chunk->start_addr) >> order;
507                         remain = bitmap_clear_    507                         remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
508                         BUG_ON(remain);           508                         BUG_ON(remain);
509                         size = nbits << order;    509                         size = nbits << order;
510                         atomic_long_add(size,     510                         atomic_long_add(size, &chunk->avail);
511                         if (owner)                511                         if (owner)
512                                 *owner = chunk    512                                 *owner = chunk->owner;
513                         rcu_read_unlock();        513                         rcu_read_unlock();
514                         return;                   514                         return;
515                 }                                 515                 }
516         }                                         516         }
517         rcu_read_unlock();                        517         rcu_read_unlock();
518         BUG();                                    518         BUG();
519 }                                                 519 }
520 EXPORT_SYMBOL(gen_pool_free_owner);               520 EXPORT_SYMBOL(gen_pool_free_owner);
521                                                   521 
522 /**                                               522 /**
523  * gen_pool_for_each_chunk - call func for eve    523  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
524  * @pool:       the generic memory pool           524  * @pool:       the generic memory pool
525  * @func:       func to call                      525  * @func:       func to call
526  * @data:       additional data used by @func     526  * @data:       additional data used by @func
527  *                                                527  *
528  * Call @func for every chunk of generic memor    528  * Call @func for every chunk of generic memory pool.  The @func is
529  * called with rcu_read_lock held.                529  * called with rcu_read_lock held.
530  */                                               530  */
531 void gen_pool_for_each_chunk(struct gen_pool *    531 void gen_pool_for_each_chunk(struct gen_pool *pool,
532         void (*func)(struct gen_pool *pool, st    532         void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
533         void *data)                               533         void *data)
534 {                                                 534 {
535         struct gen_pool_chunk *chunk;             535         struct gen_pool_chunk *chunk;
536                                                   536 
537         rcu_read_lock();                          537         rcu_read_lock();
538         list_for_each_entry_rcu(chunk, &(pool)    538         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
539                 func(pool, chunk, data);          539                 func(pool, chunk, data);
540         rcu_read_unlock();                        540         rcu_read_unlock();
541 }                                                 541 }
542 EXPORT_SYMBOL(gen_pool_for_each_chunk);           542 EXPORT_SYMBOL(gen_pool_for_each_chunk);
543                                                   543 
544 /**                                               544 /**
545  * gen_pool_has_addr - checks if an address fa    545  * gen_pool_has_addr - checks if an address falls within the range of a pool
546  * @pool:       the generic memory pool           546  * @pool:       the generic memory pool
547  * @start:      start address                     547  * @start:      start address
548  * @size:       size of the region                548  * @size:       size of the region
549  *                                                549  *
550  * Check if the range of addresses falls withi    550  * Check if the range of addresses falls within the specified pool. Returns
551  * true if the entire range is contained in th    551  * true if the entire range is contained in the pool and false otherwise.
552  */                                               552  */
553 bool gen_pool_has_addr(struct gen_pool *pool,     553 bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
554                         size_t size)              554                         size_t size)
555 {                                                 555 {
556         bool found = false;                       556         bool found = false;
557         unsigned long end = start + size - 1;     557         unsigned long end = start + size - 1;
558         struct gen_pool_chunk *chunk;             558         struct gen_pool_chunk *chunk;
559                                                   559 
560         rcu_read_lock();                          560         rcu_read_lock();
561         list_for_each_entry_rcu(chunk, &(pool)    561         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
562                 if (start >= chunk->start_addr    562                 if (start >= chunk->start_addr && start <= chunk->end_addr) {
563                         if (end <= chunk->end_    563                         if (end <= chunk->end_addr) {
564                                 found = true;     564                                 found = true;
565                                 break;            565                                 break;
566                         }                         566                         }
567                 }                                 567                 }
568         }                                         568         }
569         rcu_read_unlock();                        569         rcu_read_unlock();
570         return found;                             570         return found;
571 }                                                 571 }
572 EXPORT_SYMBOL(gen_pool_has_addr);                 572 EXPORT_SYMBOL(gen_pool_has_addr);
573                                                   573 
574 /**                                               574 /**
575  * gen_pool_avail - get available free space o    575  * gen_pool_avail - get available free space of the pool
576  * @pool: pool to get available free space        576  * @pool: pool to get available free space
577  *                                                577  *
578  * Return available free space of the specifie    578  * Return available free space of the specified pool.
579  */                                               579  */
580 size_t gen_pool_avail(struct gen_pool *pool)      580 size_t gen_pool_avail(struct gen_pool *pool)
581 {                                                 581 {
582         struct gen_pool_chunk *chunk;             582         struct gen_pool_chunk *chunk;
583         size_t avail = 0;                         583         size_t avail = 0;
584                                                   584 
585         rcu_read_lock();                          585         rcu_read_lock();
586         list_for_each_entry_rcu(chunk, &pool->    586         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
587                 avail += atomic_long_read(&chu    587                 avail += atomic_long_read(&chunk->avail);
588         rcu_read_unlock();                        588         rcu_read_unlock();
589         return avail;                             589         return avail;
590 }                                                 590 }
591 EXPORT_SYMBOL_GPL(gen_pool_avail);                591 EXPORT_SYMBOL_GPL(gen_pool_avail);
592                                                   592 
593 /**                                               593 /**
594  * gen_pool_size - get size in bytes of memory    594  * gen_pool_size - get size in bytes of memory managed by the pool
595  * @pool: pool to get size                        595  * @pool: pool to get size
596  *                                                596  *
597  * Return size in bytes of memory managed by t    597  * Return size in bytes of memory managed by the pool.
598  */                                               598  */
599 size_t gen_pool_size(struct gen_pool *pool)       599 size_t gen_pool_size(struct gen_pool *pool)
600 {                                                 600 {
601         struct gen_pool_chunk *chunk;             601         struct gen_pool_chunk *chunk;
602         size_t size = 0;                          602         size_t size = 0;
603                                                   603 
604         rcu_read_lock();                          604         rcu_read_lock();
605         list_for_each_entry_rcu(chunk, &pool->    605         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
606                 size += chunk_size(chunk);        606                 size += chunk_size(chunk);
607         rcu_read_unlock();                        607         rcu_read_unlock();
608         return size;                              608         return size;
609 }                                                 609 }
610 EXPORT_SYMBOL_GPL(gen_pool_size);                 610 EXPORT_SYMBOL_GPL(gen_pool_size);
611                                                   611 
612 /**                                               612 /**
613  * gen_pool_set_algo - set the allocation algo    613  * gen_pool_set_algo - set the allocation algorithm
614  * @pool: pool to change allocation algorithm     614  * @pool: pool to change allocation algorithm
615  * @algo: custom algorithm function               615  * @algo: custom algorithm function
616  * @data: additional data used by @algo           616  * @data: additional data used by @algo
617  *                                                617  *
618  * Call @algo for each memory allocation in th    618  * Call @algo for each memory allocation in the pool.
619  * If @algo is NULL use gen_pool_first_fit as     619  * If @algo is NULL use gen_pool_first_fit as default
620  * memory allocation function.                    620  * memory allocation function.
621  */                                               621  */
622 void gen_pool_set_algo(struct gen_pool *pool,     622 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
623 {                                                 623 {
624         rcu_read_lock();                          624         rcu_read_lock();
625                                                   625 
626         pool->algo = algo;                        626         pool->algo = algo;
627         if (!pool->algo)                          627         if (!pool->algo)
628                 pool->algo = gen_pool_first_fi    628                 pool->algo = gen_pool_first_fit;
629                                                   629 
630         pool->data = data;                        630         pool->data = data;
631                                                   631 
632         rcu_read_unlock();                        632         rcu_read_unlock();
633 }                                                 633 }
634 EXPORT_SYMBOL(gen_pool_set_algo);                 634 EXPORT_SYMBOL(gen_pool_set_algo);
635                                                   635 
636 /**                                               636 /**
637  * gen_pool_first_fit - find the first availab    637  * gen_pool_first_fit - find the first available region
638  * of memory matching the size requirement (no    638  * of memory matching the size requirement (no alignment constraint)
639  * @map: The address to base the search on        639  * @map: The address to base the search on
640  * @size: The bitmap size in bits                 640  * @size: The bitmap size in bits
641  * @start: The bitnumber to start searching at    641  * @start: The bitnumber to start searching at
642  * @nr: The number of zeroed bits we're lookin    642  * @nr: The number of zeroed bits we're looking for
643  * @data: additional data - unused                643  * @data: additional data - unused
644  * @pool: pool to find the fit region memory f    644  * @pool: pool to find the fit region memory from
645  * @start_addr: not used in this function         645  * @start_addr: not used in this function
646  */                                               646  */
647 unsigned long gen_pool_first_fit(unsigned long    647 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
648                 unsigned long start, unsigned     648                 unsigned long start, unsigned int nr, void *data,
649                 struct gen_pool *pool, unsigne    649                 struct gen_pool *pool, unsigned long start_addr)
650 {                                                 650 {
651         return bitmap_find_next_zero_area(map,    651         return bitmap_find_next_zero_area(map, size, start, nr, 0);
652 }                                                 652 }
653 EXPORT_SYMBOL(gen_pool_first_fit);                653 EXPORT_SYMBOL(gen_pool_first_fit);
654                                                   654 
655 /**                                               655 /**
656  * gen_pool_first_fit_align - find the first a    656  * gen_pool_first_fit_align - find the first available region
657  * of memory matching the size requirement (al    657  * of memory matching the size requirement (alignment constraint)
658  * @map: The address to base the search on        658  * @map: The address to base the search on
659  * @size: The bitmap size in bits                 659  * @size: The bitmap size in bits
660  * @start: The bitnumber to start searching at    660  * @start: The bitnumber to start searching at
661  * @nr: The number of zeroed bits we're lookin    661  * @nr: The number of zeroed bits we're looking for
662  * @data: data for alignment                      662  * @data: data for alignment
663  * @pool: pool to get order from                  663  * @pool: pool to get order from
664  * @start_addr: start addr of alloction chunk     664  * @start_addr: start addr of alloction chunk
665  */                                               665  */
666 unsigned long gen_pool_first_fit_align(unsigne    666 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
667                 unsigned long start, unsigned     667                 unsigned long start, unsigned int nr, void *data,
668                 struct gen_pool *pool, unsigne    668                 struct gen_pool *pool, unsigned long start_addr)
669 {                                                 669 {
670         struct genpool_data_align *alignment;     670         struct genpool_data_align *alignment;
671         unsigned long align_mask, align_off;      671         unsigned long align_mask, align_off;
672         int order;                                672         int order;
673                                                   673 
674         alignment = data;                         674         alignment = data;
675         order = pool->min_alloc_order;            675         order = pool->min_alloc_order;
676         align_mask = ((alignment->align + (1UL    676         align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
677         align_off = (start_addr & (alignment->    677         align_off = (start_addr & (alignment->align - 1)) >> order;
678                                                   678 
679         return bitmap_find_next_zero_area_off(    679         return bitmap_find_next_zero_area_off(map, size, start, nr,
680                                                   680                                               align_mask, align_off);
681 }                                                 681 }
682 EXPORT_SYMBOL(gen_pool_first_fit_align);          682 EXPORT_SYMBOL(gen_pool_first_fit_align);
683                                                   683 
684 /**                                               684 /**
685  * gen_pool_fixed_alloc - reserve a specific r    685  * gen_pool_fixed_alloc - reserve a specific region
686  * @map: The address to base the search on        686  * @map: The address to base the search on
687  * @size: The bitmap size in bits                 687  * @size: The bitmap size in bits
688  * @start: The bitnumber to start searching at    688  * @start: The bitnumber to start searching at
689  * @nr: The number of zeroed bits we're lookin    689  * @nr: The number of zeroed bits we're looking for
690  * @data: data for alignment                      690  * @data: data for alignment
691  * @pool: pool to get order from                  691  * @pool: pool to get order from
692  * @start_addr: not used in this function         692  * @start_addr: not used in this function
693  */                                               693  */
694 unsigned long gen_pool_fixed_alloc(unsigned lo    694 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
695                 unsigned long start, unsigned     695                 unsigned long start, unsigned int nr, void *data,
696                 struct gen_pool *pool, unsigne    696                 struct gen_pool *pool, unsigned long start_addr)
697 {                                                 697 {
698         struct genpool_data_fixed *fixed_data;    698         struct genpool_data_fixed *fixed_data;
699         int order;                                699         int order;
700         unsigned long offset_bit;                 700         unsigned long offset_bit;
701         unsigned long start_bit;                  701         unsigned long start_bit;
702                                                   702 
703         fixed_data = data;                        703         fixed_data = data;
704         order = pool->min_alloc_order;            704         order = pool->min_alloc_order;
705         offset_bit = fixed_data->offset >> ord    705         offset_bit = fixed_data->offset >> order;
706         if (WARN_ON(fixed_data->offset & ((1UL    706         if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
707                 return size;                      707                 return size;
708                                                   708 
709         start_bit = bitmap_find_next_zero_area    709         start_bit = bitmap_find_next_zero_area(map, size,
710                         start + offset_bit, nr    710                         start + offset_bit, nr, 0);
711         if (start_bit != offset_bit)              711         if (start_bit != offset_bit)
712                 start_bit = size;                 712                 start_bit = size;
713         return start_bit;                         713         return start_bit;
714 }                                                 714 }
715 EXPORT_SYMBOL(gen_pool_fixed_alloc);              715 EXPORT_SYMBOL(gen_pool_fixed_alloc);
716                                                   716 
717 /**                                               717 /**
718  * gen_pool_first_fit_order_align - find the f    718  * gen_pool_first_fit_order_align - find the first available region
719  * of memory matching the size requirement. Th    719  * of memory matching the size requirement. The region will be aligned
720  * to the order of the size specified.            720  * to the order of the size specified.
721  * @map: The address to base the search on        721  * @map: The address to base the search on
722  * @size: The bitmap size in bits                 722  * @size: The bitmap size in bits
723  * @start: The bitnumber to start searching at    723  * @start: The bitnumber to start searching at
724  * @nr: The number of zeroed bits we're lookin    724  * @nr: The number of zeroed bits we're looking for
725  * @data: additional data - unused                725  * @data: additional data - unused
726  * @pool: pool to find the fit region memory f    726  * @pool: pool to find the fit region memory from
727  * @start_addr: not used in this function         727  * @start_addr: not used in this function
728  */                                               728  */
729 unsigned long gen_pool_first_fit_order_align(u    729 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
730                 unsigned long size, unsigned l    730                 unsigned long size, unsigned long start,
731                 unsigned int nr, void *data, s    731                 unsigned int nr, void *data, struct gen_pool *pool,
732                 unsigned long start_addr)         732                 unsigned long start_addr)
733 {                                                 733 {
734         unsigned long align_mask = roundup_pow    734         unsigned long align_mask = roundup_pow_of_two(nr) - 1;
735                                                   735 
736         return bitmap_find_next_zero_area(map,    736         return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
737 }                                                 737 }
738 EXPORT_SYMBOL(gen_pool_first_fit_order_align);    738 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
739                                                   739 
740 /**                                               740 /**
741  * gen_pool_best_fit - find the best fitting r    741  * gen_pool_best_fit - find the best fitting region of memory
742  * matching the size requirement (no alignment    742  * matching the size requirement (no alignment constraint)
743  * @map: The address to base the search on        743  * @map: The address to base the search on
744  * @size: The bitmap size in bits                 744  * @size: The bitmap size in bits
745  * @start: The bitnumber to start searching at    745  * @start: The bitnumber to start searching at
746  * @nr: The number of zeroed bits we're lookin    746  * @nr: The number of zeroed bits we're looking for
747  * @data: additional data - unused                747  * @data: additional data - unused
748  * @pool: pool to find the fit region memory f    748  * @pool: pool to find the fit region memory from
749  * @start_addr: not used in this function         749  * @start_addr: not used in this function
750  *                                                750  *
751  * Iterate over the bitmap to find the smalles    751  * Iterate over the bitmap to find the smallest free region
752  * which we can allocate the memory.              752  * which we can allocate the memory.
753  */                                               753  */
754 unsigned long gen_pool_best_fit(unsigned long     754 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
755                 unsigned long start, unsigned     755                 unsigned long start, unsigned int nr, void *data,
756                 struct gen_pool *pool, unsigne    756                 struct gen_pool *pool, unsigned long start_addr)
757 {                                                 757 {
758         unsigned long start_bit = size;           758         unsigned long start_bit = size;
759         unsigned long len = size + 1;             759         unsigned long len = size + 1;
760         unsigned long index;                      760         unsigned long index;
761                                                   761 
762         index = bitmap_find_next_zero_area(map    762         index = bitmap_find_next_zero_area(map, size, start, nr, 0);
763                                                   763 
764         while (index < size) {                    764         while (index < size) {
765                 unsigned long next_bit = find_    765                 unsigned long next_bit = find_next_bit(map, size, index + nr);
766                 if ((next_bit - index) < len)     766                 if ((next_bit - index) < len) {
767                         len = next_bit - index    767                         len = next_bit - index;
768                         start_bit = index;        768                         start_bit = index;
769                         if (len == nr)            769                         if (len == nr)
770                                 return start_b    770                                 return start_bit;
771                 }                                 771                 }
772                 index = bitmap_find_next_zero_    772                 index = bitmap_find_next_zero_area(map, size,
773                                                   773                                                    next_bit + 1, nr, 0);
774         }                                         774         }
775                                                   775 
776         return start_bit;                         776         return start_bit;
777 }                                                 777 }
778 EXPORT_SYMBOL(gen_pool_best_fit);                 778 EXPORT_SYMBOL(gen_pool_best_fit);
779                                                   779 
780 static void devm_gen_pool_release(struct devic    780 static void devm_gen_pool_release(struct device *dev, void *res)
781 {                                                 781 {
782         gen_pool_destroy(*(struct gen_pool **)    782         gen_pool_destroy(*(struct gen_pool **)res);
783 }                                                 783 }
784                                                   784 
785 static int devm_gen_pool_match(struct device *    785 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
786 {                                                 786 {
787         struct gen_pool **p = res;                787         struct gen_pool **p = res;
788                                                   788 
789         /* NULL data matches only a pool witho    789         /* NULL data matches only a pool without an assigned name */
790         if (!data && !(*p)->name)                 790         if (!data && !(*p)->name)
791                 return 1;                         791                 return 1;
792                                                   792 
793         if (!data || !(*p)->name)                 793         if (!data || !(*p)->name)
794                 return 0;                         794                 return 0;
795                                                   795 
796         return !strcmp((*p)->name, data);         796         return !strcmp((*p)->name, data);
797 }                                                 797 }
798                                                   798 
799 /**                                               799 /**
800  * gen_pool_get - Obtain the gen_pool (if any)    800  * gen_pool_get - Obtain the gen_pool (if any) for a device
801  * @dev: device to retrieve the gen_pool from     801  * @dev: device to retrieve the gen_pool from
802  * @name: name of a gen_pool or NULL, identifi    802  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
803  *                                                803  *
804  * Returns the gen_pool for the device if one     804  * Returns the gen_pool for the device if one is present, or NULL.
805  */                                               805  */
806 struct gen_pool *gen_pool_get(struct device *d    806 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
807 {                                                 807 {
808         struct gen_pool **p;                      808         struct gen_pool **p;
809                                                   809 
810         p = devres_find(dev, devm_gen_pool_rel    810         p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
811                         (void *)name);            811                         (void *)name);
812         if (!p)                                   812         if (!p)
813                 return NULL;                      813                 return NULL;
814         return *p;                                814         return *p;
815 }                                                 815 }
816 EXPORT_SYMBOL_GPL(gen_pool_get);                  816 EXPORT_SYMBOL_GPL(gen_pool_get);
817                                                   817 
818 /**                                               818 /**
819  * devm_gen_pool_create - managed gen_pool_cre    819  * devm_gen_pool_create - managed gen_pool_create
820  * @dev: device that provides the gen_pool        820  * @dev: device that provides the gen_pool
821  * @min_alloc_order: log base 2 of number of b    821  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
822  * @nid: node selector for allocated gen_pool,    822  * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
823  * @name: name of a gen_pool or NULL, identifi    823  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
824  *                                                824  *
825  * Create a new special memory pool that can b    825  * Create a new special memory pool that can be used to manage special purpose
826  * memory not managed by the regular kmalloc/k    826  * memory not managed by the regular kmalloc/kfree interface. The pool will be
827  * automatically destroyed by the device manag    827  * automatically destroyed by the device management code.
828  */                                               828  */
829 struct gen_pool *devm_gen_pool_create(struct d    829 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
830                                       int nid,    830                                       int nid, const char *name)
831 {                                                 831 {
832         struct gen_pool **ptr, *pool;             832         struct gen_pool **ptr, *pool;
833         const char *pool_name = NULL;             833         const char *pool_name = NULL;
834                                                   834 
835         /* Check that genpool to be created is    835         /* Check that genpool to be created is uniquely addressed on device */
836         if (gen_pool_get(dev, name))              836         if (gen_pool_get(dev, name))
837                 return ERR_PTR(-EINVAL);          837                 return ERR_PTR(-EINVAL);
838                                                   838 
839         if (name) {                               839         if (name) {
840                 pool_name = kstrdup_const(name    840                 pool_name = kstrdup_const(name, GFP_KERNEL);
841                 if (!pool_name)                   841                 if (!pool_name)
842                         return ERR_PTR(-ENOMEM    842                         return ERR_PTR(-ENOMEM);
843         }                                         843         }
844                                                   844 
845         ptr = devres_alloc(devm_gen_pool_relea    845         ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
846         if (!ptr)                                 846         if (!ptr)
847                 goto free_pool_name;              847                 goto free_pool_name;
848                                                   848 
849         pool = gen_pool_create(min_alloc_order    849         pool = gen_pool_create(min_alloc_order, nid);
850         if (!pool)                                850         if (!pool)
851                 goto free_devres;                 851                 goto free_devres;
852                                                   852 
853         *ptr = pool;                              853         *ptr = pool;
854         pool->name = pool_name;                   854         pool->name = pool_name;
855         devres_add(dev, ptr);                     855         devres_add(dev, ptr);
856                                                   856 
857         return pool;                              857         return pool;
858                                                   858 
859 free_devres:                                      859 free_devres:
860         devres_free(ptr);                         860         devres_free(ptr);
861 free_pool_name:                                   861 free_pool_name:
862         kfree_const(pool_name);                   862         kfree_const(pool_name);
863                                                   863 
864         return ERR_PTR(-ENOMEM);                  864         return ERR_PTR(-ENOMEM);
865 }                                                 865 }
866 EXPORT_SYMBOL(devm_gen_pool_create);              866 EXPORT_SYMBOL(devm_gen_pool_create);
867                                                   867 
868 #ifdef CONFIG_OF                                  868 #ifdef CONFIG_OF
869 /**                                               869 /**
870  * of_gen_pool_get - find a pool by phandle pr    870  * of_gen_pool_get - find a pool by phandle property
871  * @np: device node                               871  * @np: device node
872  * @propname: property name containing phandle    872  * @propname: property name containing phandle(s)
873  * @index: index into the phandle array           873  * @index: index into the phandle array
874  *                                                874  *
875  * Returns the pool that contains the chunk st    875  * Returns the pool that contains the chunk starting at the physical
876  * address of the device tree node pointed at     876  * address of the device tree node pointed at by the phandle property,
877  * or NULL if not found.                          877  * or NULL if not found.
878  */                                               878  */
879 struct gen_pool *of_gen_pool_get(struct device    879 struct gen_pool *of_gen_pool_get(struct device_node *np,
880         const char *propname, int index)          880         const char *propname, int index)
881 {                                                 881 {
882         struct platform_device *pdev;             882         struct platform_device *pdev;
883         struct device_node *np_pool, *parent;     883         struct device_node *np_pool, *parent;
884         const char *name = NULL;                  884         const char *name = NULL;
885         struct gen_pool *pool = NULL;             885         struct gen_pool *pool = NULL;
886                                                   886 
887         np_pool = of_parse_phandle(np, propnam    887         np_pool = of_parse_phandle(np, propname, index);
888         if (!np_pool)                             888         if (!np_pool)
889                 return NULL;                      889                 return NULL;
890                                                   890 
891         pdev = of_find_device_by_node(np_pool)    891         pdev = of_find_device_by_node(np_pool);
892         if (!pdev) {                              892         if (!pdev) {
893                 /* Check if named gen_pool is     893                 /* Check if named gen_pool is created by parent node device */
894                 parent = of_get_parent(np_pool    894                 parent = of_get_parent(np_pool);
895                 pdev = of_find_device_by_node(    895                 pdev = of_find_device_by_node(parent);
896                 of_node_put(parent);              896                 of_node_put(parent);
897                                                   897 
898                 of_property_read_string(np_poo    898                 of_property_read_string(np_pool, "label", &name);
899                 if (!name)                        899                 if (!name)
900                         name = of_node_full_na    900                         name = of_node_full_name(np_pool);
901         }                                         901         }
902         if (pdev)                                 902         if (pdev)
903                 pool = gen_pool_get(&pdev->dev    903                 pool = gen_pool_get(&pdev->dev, name);
904         of_node_put(np_pool);                     904         of_node_put(np_pool);
905                                                   905 
906         return pool;                              906         return pool;
907 }                                                 907 }
908 EXPORT_SYMBOL_GPL(of_gen_pool_get);               908 EXPORT_SYMBOL_GPL(of_gen_pool_get);
909 #endif /* CONFIG_OF */                            909 #endif /* CONFIG_OF */
910                                                   910 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php