~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/net/page_pool/types.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 
  3 #ifndef _NET_PAGE_POOL_TYPES_H
  4 #define _NET_PAGE_POOL_TYPES_H
  5 
  6 #include <linux/dma-direction.h>
  7 #include <linux/ptr_ring.h>
  8 #include <linux/types.h>
  9 #include <net/netmem.h>
 10 
 11 #define PP_FLAG_DMA_MAP         BIT(0) /* Should page_pool do the DMA
 12                                         * map/unmap
 13                                         */
 14 #define PP_FLAG_DMA_SYNC_DEV    BIT(1) /* If set all pages that the driver gets
 15                                         * from page_pool will be
 16                                         * DMA-synced-for-device according to
 17                                         * the length provided by the device
 18                                         * driver.
 19                                         * Please note DMA-sync-for-CPU is still
 20                                         * device driver responsibility
 21                                         */
 22 #define PP_FLAG_SYSTEM_POOL     BIT(2) /* Global system page_pool */
 23 #define PP_FLAG_ALL             (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
 24                                  PP_FLAG_SYSTEM_POOL)
 25 
 26 /*
 27  * Fast allocation side cache array/stack
 28  *
 29  * The cache size and refill watermark is related to the network
 30  * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
 31  * ring is usually refilled and the max consumed elements will be 64,
 32  * thus a natural max size of objects needed in the cache.
 33  *
 34  * Keeping room for more objects, is due to XDP_DROP use-case.  As
 35  * XDP_DROP allows the opportunity to recycle objects directly into
 36  * this array, as it shares the same softirq/NAPI protection.  If
 37  * cache is already full (or partly full) then the XDP_DROP recycles
 38  * would have to take a slower code path.
 39  */
 40 #define PP_ALLOC_CACHE_SIZE     128
 41 #define PP_ALLOC_CACHE_REFILL   64
 42 struct pp_alloc_cache {
 43         u32 count;
 44         netmem_ref cache[PP_ALLOC_CACHE_SIZE];
 45 };
 46 
 47 /**
 48  * struct page_pool_params - page pool parameters
 49  * @fast:       params accessed frequently on hotpath
 50  * @order:      2^order pages on allocation
 51  * @pool_size:  size of the ptr_ring
 52  * @nid:        NUMA node id to allocate from pages from
 53  * @dev:        device, for DMA pre-mapping purposes
 54  * @napi:       NAPI which is the sole consumer of pages, otherwise NULL
 55  * @dma_dir:    DMA mapping direction
 56  * @max_len:    max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
 57  * @offset:     DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
 58  * @slow:       params with slowpath access only (initialization and Netlink)
 59  * @netdev:     netdev this pool will serve (leave as NULL if none or multiple)
 60  * @flags:      PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
 61  */
 62 struct page_pool_params {
 63         struct_group_tagged(page_pool_params_fast, fast,
 64                 unsigned int    order;
 65                 unsigned int    pool_size;
 66                 int             nid;
 67                 struct device   *dev;
 68                 struct napi_struct *napi;
 69                 enum dma_data_direction dma_dir;
 70                 unsigned int    max_len;
 71                 unsigned int    offset;
 72         );
 73         struct_group_tagged(page_pool_params_slow, slow,
 74                 struct net_device *netdev;
 75                 unsigned int    flags;
 76 /* private: used by test code only */
 77                 void (*init_callback)(netmem_ref netmem, void *arg);
 78                 void *init_arg;
 79         );
 80 };
 81 
 82 #ifdef CONFIG_PAGE_POOL_STATS
 83 /**
 84  * struct page_pool_alloc_stats - allocation statistics
 85  * @fast:       successful fast path allocations
 86  * @slow:       slow path order-0 allocations
 87  * @slow_high_order: slow path high order allocations
 88  * @empty:      ptr ring is empty, so a slow path allocation was forced
 89  * @refill:     an allocation which triggered a refill of the cache
 90  * @waive:      pages obtained from the ptr ring that cannot be added to
 91  *              the cache due to a NUMA mismatch
 92  */
 93 struct page_pool_alloc_stats {
 94         u64 fast;
 95         u64 slow;
 96         u64 slow_high_order;
 97         u64 empty;
 98         u64 refill;
 99         u64 waive;
100 };
101 
102 /**
103  * struct page_pool_recycle_stats - recycling (freeing) statistics
104  * @cached:     recycling placed page in the page pool cache
105  * @cache_full: page pool cache was full
106  * @ring:       page placed into the ptr ring
107  * @ring_full:  page released from page pool because the ptr ring was full
108  * @released_refcnt:    page released (and not recycled) because refcnt > 1
109  */
110 struct page_pool_recycle_stats {
111         u64 cached;
112         u64 cache_full;
113         u64 ring;
114         u64 ring_full;
115         u64 released_refcnt;
116 };
117 
118 /**
119  * struct page_pool_stats - combined page pool use statistics
120  * @alloc_stats:        see struct page_pool_alloc_stats
121  * @recycle_stats:      see struct page_pool_recycle_stats
122  *
123  * Wrapper struct for combining page pool stats with different storage
124  * requirements.
125  */
126 struct page_pool_stats {
127         struct page_pool_alloc_stats alloc_stats;
128         struct page_pool_recycle_stats recycle_stats;
129 };
130 #endif
131 
132 /* The whole frag API block must stay within one cacheline. On 32-bit systems,
133  * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``.
134  * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``.
135  * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that
136  * one for simplicity.
137  * Having it aligned to a cacheline boundary may be excessive and doesn't bring
138  * any good.
139  */
140 #define PAGE_POOL_FRAG_GROUP_ALIGN      (4 * sizeof(long))
141 
142 struct page_pool {
143         struct page_pool_params_fast p;
144 
145         int cpuid;
146         u32 pages_state_hold_cnt;
147 
148         bool has_init_callback:1;       /* slow::init_callback is set */
149         bool dma_map:1;                 /* Perform DMA mapping */
150         bool dma_sync:1;                /* Perform DMA sync */
151 #ifdef CONFIG_PAGE_POOL_STATS
152         bool system:1;                  /* This is a global percpu pool */
153 #endif
154 
155         __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
156         long frag_users;
157         netmem_ref frag_page;
158         unsigned int frag_offset;
159         __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
160 
161         struct delayed_work release_dw;
162         void (*disconnect)(void *pool);
163         unsigned long defer_start;
164         unsigned long defer_warn;
165 
166 #ifdef CONFIG_PAGE_POOL_STATS
167         /* these stats are incremented while in softirq context */
168         struct page_pool_alloc_stats alloc_stats;
169 #endif
170         u32 xdp_mem_id;
171 
172         /*
173          * Data structure for allocation side
174          *
175          * Drivers allocation side usually already perform some kind
176          * of resource protection.  Piggyback on this protection, and
177          * require driver to protect allocation side.
178          *
179          * For NIC drivers this means, allocate a page_pool per
180          * RX-queue. As the RX-queue is already protected by
181          * Softirq/BH scheduling and napi_schedule. NAPI schedule
182          * guarantee that a single napi_struct will only be scheduled
183          * on a single CPU (see napi_schedule).
184          */
185         struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
186 
187         /* Data structure for storing recycled pages.
188          *
189          * Returning/freeing pages is more complicated synchronization
190          * wise, because free's can happen on remote CPUs, with no
191          * association with allocation resource.
192          *
193          * Use ptr_ring, as it separates consumer and producer
194          * efficiently, it a way that doesn't bounce cache-lines.
195          *
196          * TODO: Implement bulk return pages into this structure.
197          */
198         struct ptr_ring ring;
199 
200 #ifdef CONFIG_PAGE_POOL_STATS
201         /* recycle stats are per-cpu to avoid locking */
202         struct page_pool_recycle_stats __percpu *recycle_stats;
203 #endif
204         atomic_t pages_state_release_cnt;
205 
206         /* A page_pool is strictly tied to a single RX-queue being
207          * protected by NAPI, due to above pp_alloc_cache. This
208          * refcnt serves purpose is to simplify drivers error handling.
209          */
210         refcount_t user_cnt;
211 
212         u64 destroy_cnt;
213 
214         /* Slow/Control-path information follows */
215         struct page_pool_params_slow slow;
216         /* User-facing fields, protected by page_pools_lock */
217         struct {
218                 struct hlist_node list;
219                 u64 detach_time;
220                 u32 napi_id;
221                 u32 id;
222         } user;
223 };
224 
225 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
226 netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
227 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
228                                   unsigned int size, gfp_t gfp);
229 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
230                                        unsigned int *offset, unsigned int size,
231                                        gfp_t gfp);
232 struct page_pool *page_pool_create(const struct page_pool_params *params);
233 struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
234                                           int cpuid);
235 
236 struct xdp_mem_info;
237 
238 #ifdef CONFIG_PAGE_POOL
239 void page_pool_disable_direct_recycling(struct page_pool *pool);
240 void page_pool_destroy(struct page_pool *pool);
241 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
242                            const struct xdp_mem_info *mem);
243 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
244                              int count);
245 #else
246 static inline void page_pool_destroy(struct page_pool *pool)
247 {
248 }
249 
250 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
251                                          void (*disconnect)(void *),
252                                          const struct xdp_mem_info *mem)
253 {
254 }
255 
256 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
257                                            int count)
258 {
259 }
260 #endif
261 
262 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
263                                   unsigned int dma_sync_size,
264                                   bool allow_direct);
265 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
266                                 unsigned int dma_sync_size,
267                                 bool allow_direct);
268 
269 static inline bool is_page_pool_compiled_in(void)
270 {
271 #ifdef CONFIG_PAGE_POOL
272         return true;
273 #else
274         return false;
275 #endif
276 }
277 
278 /* Caller must provide appropriate safe context, e.g. NAPI. */
279 void page_pool_update_nid(struct page_pool *pool, int new_nid);
280 
281 #endif /* _NET_PAGE_POOL_H */
282 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php