~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/mempool.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *  linux/mm/mempool.c
  4  *
  5  *  memory buffer pool support. Such pools are mostly used
  6  *  for guaranteed, deadlock-free memory allocations during
  7  *  extreme VM load.
  8  *
  9  *  started by Ingo Molnar, Copyright (C) 2001
 10  *  debugging by David Rientjes, Copyright (C) 2015
 11  */
 12 
 13 #include <linux/mm.h>
 14 #include <linux/slab.h>
 15 #include <linux/highmem.h>
 16 #include <linux/kasan.h>
 17 #include <linux/kmemleak.h>
 18 #include <linux/export.h>
 19 #include <linux/mempool.h>
 20 #include <linux/writeback.h>
 21 #include "slab.h"
 22 
 23 #ifdef CONFIG_SLUB_DEBUG_ON
 24 static void poison_error(mempool_t *pool, void *element, size_t size,
 25                          size_t byte)
 26 {
 27         const int nr = pool->curr_nr;
 28         const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
 29         const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
 30         int i;
 31 
 32         pr_err("BUG: mempool element poison mismatch\n");
 33         pr_err("Mempool %p size %zu\n", pool, size);
 34         pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
 35         for (i = start; i < end; i++)
 36                 pr_cont("%x ", *(u8 *)(element + i));
 37         pr_cont("%s\n", end < size ? "..." : "");
 38         dump_stack();
 39 }
 40 
 41 static void __check_element(mempool_t *pool, void *element, size_t size)
 42 {
 43         u8 *obj = element;
 44         size_t i;
 45 
 46         for (i = 0; i < size; i++) {
 47                 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
 48 
 49                 if (obj[i] != exp) {
 50                         poison_error(pool, element, size, i);
 51                         return;
 52                 }
 53         }
 54         memset(obj, POISON_INUSE, size);
 55 }
 56 
 57 static void check_element(mempool_t *pool, void *element)
 58 {
 59         /* Skip checking: KASAN might save its metadata in the element. */
 60         if (kasan_enabled())
 61                 return;
 62 
 63         /* Mempools backed by slab allocator */
 64         if (pool->free == mempool_kfree) {
 65                 __check_element(pool, element, (size_t)pool->pool_data);
 66         } else if (pool->free == mempool_free_slab) {
 67                 __check_element(pool, element, kmem_cache_size(pool->pool_data));
 68         } else if (pool->free == mempool_free_pages) {
 69                 /* Mempools backed by page allocator */
 70                 int order = (int)(long)pool->pool_data;
 71                 void *addr = kmap_local_page((struct page *)element);
 72 
 73                 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
 74                 kunmap_local(addr);
 75         }
 76 }
 77 
 78 static void __poison_element(void *element, size_t size)
 79 {
 80         u8 *obj = element;
 81 
 82         memset(obj, POISON_FREE, size - 1);
 83         obj[size - 1] = POISON_END;
 84 }
 85 
 86 static void poison_element(mempool_t *pool, void *element)
 87 {
 88         /* Skip poisoning: KASAN might save its metadata in the element. */
 89         if (kasan_enabled())
 90                 return;
 91 
 92         /* Mempools backed by slab allocator */
 93         if (pool->alloc == mempool_kmalloc) {
 94                 __poison_element(element, (size_t)pool->pool_data);
 95         } else if (pool->alloc == mempool_alloc_slab) {
 96                 __poison_element(element, kmem_cache_size(pool->pool_data));
 97         } else if (pool->alloc == mempool_alloc_pages) {
 98                 /* Mempools backed by page allocator */
 99                 int order = (int)(long)pool->pool_data;
100                 void *addr = kmap_local_page((struct page *)element);
101 
102                 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
103                 kunmap_local(addr);
104         }
105 }
106 #else /* CONFIG_SLUB_DEBUG_ON */
107 static inline void check_element(mempool_t *pool, void *element)
108 {
109 }
110 static inline void poison_element(mempool_t *pool, void *element)
111 {
112 }
113 #endif /* CONFIG_SLUB_DEBUG_ON */
114 
115 static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
116 {
117         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
118                 return kasan_mempool_poison_object(element);
119         else if (pool->alloc == mempool_alloc_pages)
120                 return kasan_mempool_poison_pages(element,
121                                                 (unsigned long)pool->pool_data);
122         return true;
123 }
124 
125 static void kasan_unpoison_element(mempool_t *pool, void *element)
126 {
127         if (pool->alloc == mempool_kmalloc)
128                 kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
129         else if (pool->alloc == mempool_alloc_slab)
130                 kasan_mempool_unpoison_object(element,
131                                               kmem_cache_size(pool->pool_data));
132         else if (pool->alloc == mempool_alloc_pages)
133                 kasan_mempool_unpoison_pages(element,
134                                              (unsigned long)pool->pool_data);
135 }
136 
137 static __always_inline void add_element(mempool_t *pool, void *element)
138 {
139         BUG_ON(pool->curr_nr >= pool->min_nr);
140         poison_element(pool, element);
141         if (kasan_poison_element(pool, element))
142                 pool->elements[pool->curr_nr++] = element;
143 }
144 
145 static void *remove_element(mempool_t *pool)
146 {
147         void *element = pool->elements[--pool->curr_nr];
148 
149         BUG_ON(pool->curr_nr < 0);
150         kasan_unpoison_element(pool, element);
151         check_element(pool, element);
152         return element;
153 }
154 
155 /**
156  * mempool_exit - exit a mempool initialized with mempool_init()
157  * @pool:      pointer to the memory pool which was initialized with
158  *             mempool_init().
159  *
160  * Free all reserved elements in @pool and @pool itself.  This function
161  * only sleeps if the free_fn() function sleeps.
162  *
163  * May be called on a zeroed but uninitialized mempool (i.e. allocated with
164  * kzalloc()).
165  */
166 void mempool_exit(mempool_t *pool)
167 {
168         while (pool->curr_nr) {
169                 void *element = remove_element(pool);
170                 pool->free(element, pool->pool_data);
171         }
172         kfree(pool->elements);
173         pool->elements = NULL;
174 }
175 EXPORT_SYMBOL(mempool_exit);
176 
177 /**
178  * mempool_destroy - deallocate a memory pool
179  * @pool:      pointer to the memory pool which was allocated via
180  *             mempool_create().
181  *
182  * Free all reserved elements in @pool and @pool itself.  This function
183  * only sleeps if the free_fn() function sleeps.
184  */
185 void mempool_destroy(mempool_t *pool)
186 {
187         if (unlikely(!pool))
188                 return;
189 
190         mempool_exit(pool);
191         kfree(pool);
192 }
193 EXPORT_SYMBOL(mempool_destroy);
194 
195 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
196                       mempool_free_t *free_fn, void *pool_data,
197                       gfp_t gfp_mask, int node_id)
198 {
199         spin_lock_init(&pool->lock);
200         pool->min_nr    = min_nr;
201         pool->pool_data = pool_data;
202         pool->alloc     = alloc_fn;
203         pool->free      = free_fn;
204         init_waitqueue_head(&pool->wait);
205 
206         pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
207                                             gfp_mask, node_id);
208         if (!pool->elements)
209                 return -ENOMEM;
210 
211         /*
212          * First pre-allocate the guaranteed number of buffers.
213          */
214         while (pool->curr_nr < pool->min_nr) {
215                 void *element;
216 
217                 element = pool->alloc(gfp_mask, pool->pool_data);
218                 if (unlikely(!element)) {
219                         mempool_exit(pool);
220                         return -ENOMEM;
221                 }
222                 add_element(pool, element);
223         }
224 
225         return 0;
226 }
227 EXPORT_SYMBOL(mempool_init_node);
228 
229 /**
230  * mempool_init - initialize a memory pool
231  * @pool:      pointer to the memory pool that should be initialized
232  * @min_nr:    the minimum number of elements guaranteed to be
233  *             allocated for this pool.
234  * @alloc_fn:  user-defined element-allocation function.
235  * @free_fn:   user-defined element-freeing function.
236  * @pool_data: optional private data available to the user-defined functions.
237  *
238  * Like mempool_create(), but initializes the pool in (i.e. embedded in another
239  * structure).
240  *
241  * Return: %0 on success, negative error code otherwise.
242  */
243 int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
244                         mempool_free_t *free_fn, void *pool_data)
245 {
246         return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
247                                  pool_data, GFP_KERNEL, NUMA_NO_NODE);
248 
249 }
250 EXPORT_SYMBOL(mempool_init_noprof);
251 
252 /**
253  * mempool_create_node - create a memory pool
254  * @min_nr:    the minimum number of elements guaranteed to be
255  *             allocated for this pool.
256  * @alloc_fn:  user-defined element-allocation function.
257  * @free_fn:   user-defined element-freeing function.
258  * @pool_data: optional private data available to the user-defined functions.
259  * @gfp_mask:  memory allocation flags
260  * @node_id:   numa node to allocate on
261  *
262  * this function creates and allocates a guaranteed size, preallocated
263  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
264  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
265  * functions might sleep - as long as the mempool_alloc() function is not called
266  * from IRQ contexts.
267  *
268  * Return: pointer to the created memory pool object or %NULL on error.
269  */
270 mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
271                                       mempool_free_t *free_fn, void *pool_data,
272                                       gfp_t gfp_mask, int node_id)
273 {
274         mempool_t *pool;
275 
276         pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
277         if (!pool)
278                 return NULL;
279 
280         if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
281                               gfp_mask, node_id)) {
282                 kfree(pool);
283                 return NULL;
284         }
285 
286         return pool;
287 }
288 EXPORT_SYMBOL(mempool_create_node_noprof);
289 
290 /**
291  * mempool_resize - resize an existing memory pool
292  * @pool:       pointer to the memory pool which was allocated via
293  *              mempool_create().
294  * @new_min_nr: the new minimum number of elements guaranteed to be
295  *              allocated for this pool.
296  *
297  * This function shrinks/grows the pool. In the case of growing,
298  * it cannot be guaranteed that the pool will be grown to the new
299  * size immediately, but new mempool_free() calls will refill it.
300  * This function may sleep.
301  *
302  * Note, the caller must guarantee that no mempool_destroy is called
303  * while this function is running. mempool_alloc() & mempool_free()
304  * might be called (eg. from IRQ contexts) while this function executes.
305  *
306  * Return: %0 on success, negative error code otherwise.
307  */
308 int mempool_resize(mempool_t *pool, int new_min_nr)
309 {
310         void *element;
311         void **new_elements;
312         unsigned long flags;
313 
314         BUG_ON(new_min_nr <= 0);
315         might_sleep();
316 
317         spin_lock_irqsave(&pool->lock, flags);
318         if (new_min_nr <= pool->min_nr) {
319                 while (new_min_nr < pool->curr_nr) {
320                         element = remove_element(pool);
321                         spin_unlock_irqrestore(&pool->lock, flags);
322                         pool->free(element, pool->pool_data);
323                         spin_lock_irqsave(&pool->lock, flags);
324                 }
325                 pool->min_nr = new_min_nr;
326                 goto out_unlock;
327         }
328         spin_unlock_irqrestore(&pool->lock, flags);
329 
330         /* Grow the pool */
331         new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
332                                      GFP_KERNEL);
333         if (!new_elements)
334                 return -ENOMEM;
335 
336         spin_lock_irqsave(&pool->lock, flags);
337         if (unlikely(new_min_nr <= pool->min_nr)) {
338                 /* Raced, other resize will do our work */
339                 spin_unlock_irqrestore(&pool->lock, flags);
340                 kfree(new_elements);
341                 goto out;
342         }
343         memcpy(new_elements, pool->elements,
344                         pool->curr_nr * sizeof(*new_elements));
345         kfree(pool->elements);
346         pool->elements = new_elements;
347         pool->min_nr = new_min_nr;
348 
349         while (pool->curr_nr < pool->min_nr) {
350                 spin_unlock_irqrestore(&pool->lock, flags);
351                 element = pool->alloc(GFP_KERNEL, pool->pool_data);
352                 if (!element)
353                         goto out;
354                 spin_lock_irqsave(&pool->lock, flags);
355                 if (pool->curr_nr < pool->min_nr) {
356                         add_element(pool, element);
357                 } else {
358                         spin_unlock_irqrestore(&pool->lock, flags);
359                         pool->free(element, pool->pool_data);   /* Raced */
360                         goto out;
361                 }
362         }
363 out_unlock:
364         spin_unlock_irqrestore(&pool->lock, flags);
365 out:
366         return 0;
367 }
368 EXPORT_SYMBOL(mempool_resize);
369 
370 /**
371  * mempool_alloc - allocate an element from a specific memory pool
372  * @pool:      pointer to the memory pool which was allocated via
373  *             mempool_create().
374  * @gfp_mask:  the usual allocation bitmask.
375  *
376  * this function only sleeps if the alloc_fn() function sleeps or
377  * returns NULL. Note that due to preallocation, this function
378  * *never* fails when called from process contexts. (it might
379  * fail if called from an IRQ context.)
380  * Note: using __GFP_ZERO is not supported.
381  *
382  * Return: pointer to the allocated element or %NULL on error.
383  */
384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask)
385 {
386         void *element;
387         unsigned long flags;
388         wait_queue_entry_t wait;
389         gfp_t gfp_temp;
390 
391         VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
392         might_alloc(gfp_mask);
393 
394         gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
395         gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
396         gfp_mask |= __GFP_NOWARN;       /* failures are OK */
397 
398         gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
399 
400 repeat_alloc:
401 
402         element = pool->alloc(gfp_temp, pool->pool_data);
403         if (likely(element != NULL))
404                 return element;
405 
406         spin_lock_irqsave(&pool->lock, flags);
407         if (likely(pool->curr_nr)) {
408                 element = remove_element(pool);
409                 spin_unlock_irqrestore(&pool->lock, flags);
410                 /* paired with rmb in mempool_free(), read comment there */
411                 smp_wmb();
412                 /*
413                  * Update the allocation stack trace as this is more useful
414                  * for debugging.
415                  */
416                 kmemleak_update_trace(element);
417                 return element;
418         }
419 
420         /*
421          * We use gfp mask w/o direct reclaim or IO for the first round.  If
422          * alloc failed with that and @pool was empty, retry immediately.
423          */
424         if (gfp_temp != gfp_mask) {
425                 spin_unlock_irqrestore(&pool->lock, flags);
426                 gfp_temp = gfp_mask;
427                 goto repeat_alloc;
428         }
429 
430         /* We must not sleep if !__GFP_DIRECT_RECLAIM */
431         if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
432                 spin_unlock_irqrestore(&pool->lock, flags);
433                 return NULL;
434         }
435 
436         /* Let's wait for someone else to return an element to @pool */
437         init_wait(&wait);
438         prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
439 
440         spin_unlock_irqrestore(&pool->lock, flags);
441 
442         /*
443          * FIXME: this should be io_schedule().  The timeout is there as a
444          * workaround for some DM problems in 2.6.18.
445          */
446         io_schedule_timeout(5*HZ);
447 
448         finish_wait(&pool->wait, &wait);
449         goto repeat_alloc;
450 }
451 EXPORT_SYMBOL(mempool_alloc_noprof);
452 
453 /**
454  * mempool_alloc_preallocated - allocate an element from preallocated elements
455  *                              belonging to a specific memory pool
456  * @pool:      pointer to the memory pool which was allocated via
457  *             mempool_create().
458  *
459  * This function is similar to mempool_alloc, but it only attempts allocating
460  * an element from the preallocated elements. It does not sleep and immediately
461  * returns if no preallocated elements are available.
462  *
463  * Return: pointer to the allocated element or %NULL if no elements are
464  * available.
465  */
466 void *mempool_alloc_preallocated(mempool_t *pool)
467 {
468         void *element;
469         unsigned long flags;
470 
471         spin_lock_irqsave(&pool->lock, flags);
472         if (likely(pool->curr_nr)) {
473                 element = remove_element(pool);
474                 spin_unlock_irqrestore(&pool->lock, flags);
475                 /* paired with rmb in mempool_free(), read comment there */
476                 smp_wmb();
477                 /*
478                  * Update the allocation stack trace as this is more useful
479                  * for debugging.
480                  */
481                 kmemleak_update_trace(element);
482                 return element;
483         }
484         spin_unlock_irqrestore(&pool->lock, flags);
485 
486         return NULL;
487 }
488 EXPORT_SYMBOL(mempool_alloc_preallocated);
489 
490 /**
491  * mempool_free - return an element to the pool.
492  * @element:   pool element pointer.
493  * @pool:      pointer to the memory pool which was allocated via
494  *             mempool_create().
495  *
496  * this function only sleeps if the free_fn() function sleeps.
497  */
498 void mempool_free(void *element, mempool_t *pool)
499 {
500         unsigned long flags;
501 
502         if (unlikely(element == NULL))
503                 return;
504 
505         /*
506          * Paired with the wmb in mempool_alloc().  The preceding read is
507          * for @element and the following @pool->curr_nr.  This ensures
508          * that the visible value of @pool->curr_nr is from after the
509          * allocation of @element.  This is necessary for fringe cases
510          * where @element was passed to this task without going through
511          * barriers.
512          *
513          * For example, assume @p is %NULL at the beginning and one task
514          * performs "p = mempool_alloc(...);" while another task is doing
515          * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
516          * may end up using curr_nr value which is from before allocation
517          * of @p without the following rmb.
518          */
519         smp_rmb();
520 
521         /*
522          * For correctness, we need a test which is guaranteed to trigger
523          * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
524          * without locking achieves that and refilling as soon as possible
525          * is desirable.
526          *
527          * Because curr_nr visible here is always a value after the
528          * allocation of @element, any task which decremented curr_nr below
529          * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
530          * incremented to min_nr afterwards.  If curr_nr gets incremented
531          * to min_nr after the allocation of @element, the elements
532          * allocated after that are subject to the same guarantee.
533          *
534          * Waiters happen iff curr_nr is 0 and the above guarantee also
535          * ensures that there will be frees which return elements to the
536          * pool waking up the waiters.
537          */
538         if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
539                 spin_lock_irqsave(&pool->lock, flags);
540                 if (likely(pool->curr_nr < pool->min_nr)) {
541                         add_element(pool, element);
542                         spin_unlock_irqrestore(&pool->lock, flags);
543                         wake_up(&pool->wait);
544                         return;
545                 }
546                 spin_unlock_irqrestore(&pool->lock, flags);
547         }
548         pool->free(element, pool->pool_data);
549 }
550 EXPORT_SYMBOL(mempool_free);
551 
552 /*
553  * A commonly used alloc and free fn.
554  */
555 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
556 {
557         struct kmem_cache *mem = pool_data;
558         VM_BUG_ON(mem->ctor);
559         return kmem_cache_alloc_noprof(mem, gfp_mask);
560 }
561 EXPORT_SYMBOL(mempool_alloc_slab);
562 
563 void mempool_free_slab(void *element, void *pool_data)
564 {
565         struct kmem_cache *mem = pool_data;
566         kmem_cache_free(mem, element);
567 }
568 EXPORT_SYMBOL(mempool_free_slab);
569 
570 /*
571  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
572  * specified by pool_data
573  */
574 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
575 {
576         size_t size = (size_t)pool_data;
577         return kmalloc_noprof(size, gfp_mask);
578 }
579 EXPORT_SYMBOL(mempool_kmalloc);
580 
581 void mempool_kfree(void *element, void *pool_data)
582 {
583         kfree(element);
584 }
585 EXPORT_SYMBOL(mempool_kfree);
586 
587 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data)
588 {
589         size_t size = (size_t)pool_data;
590         return kvmalloc(size, gfp_mask);
591 }
592 EXPORT_SYMBOL(mempool_kvmalloc);
593 
594 void mempool_kvfree(void *element, void *pool_data)
595 {
596         kvfree(element);
597 }
598 EXPORT_SYMBOL(mempool_kvfree);
599 
600 /*
601  * A simple mempool-backed page allocator that allocates pages
602  * of the order specified by pool_data.
603  */
604 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
605 {
606         int order = (int)(long)pool_data;
607         return alloc_pages_noprof(gfp_mask, order);
608 }
609 EXPORT_SYMBOL(mempool_alloc_pages);
610 
611 void mempool_free_pages(void *element, void *pool_data)
612 {
613         int order = (int)(long)pool_data;
614         __free_pages(element, order);
615 }
616 EXPORT_SYMBOL(mempool_free_pages);
617 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php