~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/maple_tree.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0+ */
  2 #ifndef _LINUX_MAPLE_TREE_H
  3 #define _LINUX_MAPLE_TREE_H
  4 /*
  5  * Maple Tree - An RCU-safe adaptive tree for storing ranges
  6  * Copyright (c) 2018-2022 Oracle
  7  * Authors:     Liam R. Howlett <Liam.Howlett@Oracle.com>
  8  *              Matthew Wilcox <willy@infradead.org>
  9  */
 10 
 11 #include <linux/kernel.h>
 12 #include <linux/rcupdate.h>
 13 #include <linux/spinlock.h>
 14 /* #define CONFIG_MAPLE_RCU_DISABLED */
 15 
 16 /*
 17  * Allocated nodes are mutable until they have been inserted into the tree,
 18  * at which time they cannot change their type until they have been removed
 19  * from the tree and an RCU grace period has passed.
 20  *
 21  * Removed nodes have their ->parent set to point to themselves.  RCU readers
 22  * check ->parent before relying on the value that they loaded from the
 23  * slots array.  This lets us reuse the slots array for the RCU head.
 24  *
 25  * Nodes in the tree point to their parent unless bit 0 is set.
 26  */
 27 #if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
 28 /* 64bit sizes */
 29 #define MAPLE_NODE_SLOTS        31      /* 256 bytes including ->parent */
 30 #define MAPLE_RANGE64_SLOTS     16      /* 256 bytes */
 31 #define MAPLE_ARANGE64_SLOTS    10      /* 240 bytes */
 32 #define MAPLE_ALLOC_SLOTS       (MAPLE_NODE_SLOTS - 1)
 33 #else
 34 /* 32bit sizes */
 35 #define MAPLE_NODE_SLOTS        63      /* 256 bytes including ->parent */
 36 #define MAPLE_RANGE64_SLOTS     32      /* 256 bytes */
 37 #define MAPLE_ARANGE64_SLOTS    21      /* 240 bytes */
 38 #define MAPLE_ALLOC_SLOTS       (MAPLE_NODE_SLOTS - 2)
 39 #endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
 40 
 41 #define MAPLE_NODE_MASK         255UL
 42 
 43 /*
 44  * The node->parent of the root node has bit 0 set and the rest of the pointer
 45  * is a pointer to the tree itself.  No more bits are available in this pointer
 46  * (on m68k, the data structure may only be 2-byte aligned).
 47  *
 48  * Internal non-root nodes can only have maple_range_* nodes as parents.  The
 49  * parent pointer is 256B aligned like all other tree nodes.  When storing a 32
 50  * or 64 bit values, the offset can fit into 4 bits.  The 16 bit values need an
 51  * extra bit to store the offset.  This extra bit comes from a reuse of the last
 52  * bit in the node type.  This is possible by using bit 1 to indicate if bit 2
 53  * is part of the type or the slot.
 54  *
 55  * Once the type is decided, the decision of an allocation range type or a range
 56  * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
 57  * flag.
 58  *
 59  *  Node types:
 60  *   0x??1 = Root
 61  *   0x?00 = 16 bit nodes
 62  *   0x010 = 32 bit nodes
 63  *   0x110 = 64 bit nodes
 64  *
 65  *  Slot size and location in the parent pointer:
 66  *   type  : slot location
 67  *   0x??1 : Root
 68  *   0x?00 : 16 bit values, type in 0-1, slot in 2-6
 69  *   0x010 : 32 bit values, type in 0-2, slot in 3-6
 70  *   0x110 : 64 bit values, type in 0-2, slot in 3-6
 71  */
 72 
 73 /*
 74  * This metadata is used to optimize the gap updating code and in reverse
 75  * searching for gaps or any other code that needs to find the end of the data.
 76  */
 77 struct maple_metadata {
 78         unsigned char end;
 79         unsigned char gap;
 80 };
 81 
 82 /*
 83  * Leaf nodes do not store pointers to nodes, they store user data.  Users may
 84  * store almost any bit pattern.  As noted above, the optimisation of storing an
 85  * entry at 0 in the root pointer cannot be done for data which have the bottom
 86  * two bits set to '10'.  We also reserve values with the bottom two bits set to
 87  * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use.  Some APIs
 88  * return errnos as a negative errno shifted right by two bits and the bottom
 89  * two bits set to '10', and while choosing to store these values in the array
 90  * is not an error, it may lead to confusion if you're testing for an error with
 91  * mas_is_err().
 92  *
 93  * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
 94  * 3-6), bit 2 is reserved.  That leaves bits 0-1 unused for now.
 95  *
 96  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
 97  * indicate that the tree is specifying ranges,  Pivots may appear in the
 98  * subtree with an entry attached to the value whereas keys are unique to a
 99  * specific position of a B-tree.  Pivot values are inclusive of the slot with
100  * the same index.
101  */
102 
103 struct maple_range_64 {
104         struct maple_pnode *parent;
105         unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
106         union {
107                 void __rcu *slot[MAPLE_RANGE64_SLOTS];
108                 struct {
109                         void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
110                         struct maple_metadata meta;
111                 };
112         };
113 };
114 
115 /*
116  * At tree creation time, the user can specify that they're willing to trade off
117  * storing fewer entries in a tree in return for storing more information in
118  * each node.
119  *
120  * The maple tree supports recording the largest range of NULL entries available
121  * in this node, also called gaps.  This optimises the tree for allocating a
122  * range.
123  */
124 struct maple_arange_64 {
125         struct maple_pnode *parent;
126         unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
127         void __rcu *slot[MAPLE_ARANGE64_SLOTS];
128         unsigned long gap[MAPLE_ARANGE64_SLOTS];
129         struct maple_metadata meta;
130 };
131 
132 struct maple_alloc {
133         unsigned long total;
134         unsigned char node_count;
135         unsigned int request_count;
136         struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
137 };
138 
139 struct maple_topiary {
140         struct maple_pnode *parent;
141         struct maple_enode *next; /* Overlaps the pivot */
142 };
143 
144 enum maple_type {
145         maple_dense,
146         maple_leaf_64,
147         maple_range_64,
148         maple_arange_64,
149 };
150 
151 
152 /**
153  * DOC: Maple tree flags
154  *
155  * * MT_FLAGS_ALLOC_RANGE       - Track gaps in this tree
156  * * MT_FLAGS_USE_RCU           - Operate in RCU mode
157  * * MT_FLAGS_HEIGHT_OFFSET     - The position of the tree height in the flags
158  * * MT_FLAGS_HEIGHT_MASK       - The mask for the maple tree height value
159  * * MT_FLAGS_LOCK_MASK         - How the mt_lock is used
160  * * MT_FLAGS_LOCK_IRQ          - Acquired irq-safe
161  * * MT_FLAGS_LOCK_BH           - Acquired bh-safe
162  * * MT_FLAGS_LOCK_EXTERN       - mt_lock is not used
163  *
164  * MAPLE_HEIGHT_MAX     The largest height that can be stored
165  */
166 #define MT_FLAGS_ALLOC_RANGE    0x01
167 #define MT_FLAGS_USE_RCU        0x02
168 #define MT_FLAGS_HEIGHT_OFFSET  0x02
169 #define MT_FLAGS_HEIGHT_MASK    0x7C
170 #define MT_FLAGS_LOCK_MASK      0x300
171 #define MT_FLAGS_LOCK_IRQ       0x100
172 #define MT_FLAGS_LOCK_BH        0x200
173 #define MT_FLAGS_LOCK_EXTERN    0x300
174 #define MT_FLAGS_ALLOC_WRAPPED  0x0800
175 
176 #define MAPLE_HEIGHT_MAX        31
177 
178 
179 #define MAPLE_NODE_TYPE_MASK    0x0F
180 #define MAPLE_NODE_TYPE_SHIFT   0x03
181 
182 #define MAPLE_RESERVED_RANGE    4096
183 
184 #ifdef CONFIG_LOCKDEP
185 typedef struct lockdep_map *lockdep_map_p;
186 #define mt_lock_is_held(mt)                                             \
187         (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
188 
189 #define mt_write_lock_is_held(mt)                                       \
190         (!(mt)->ma_external_lock ||                                     \
191          lock_is_held_type((mt)->ma_external_lock, 0))
192 
193 #define mt_set_external_lock(mt, lock)                                  \
194         (mt)->ma_external_lock = &(lock)->dep_map
195 
196 #define mt_on_stack(mt)                 (mt).ma_external_lock = NULL
197 #else
198 typedef struct { /* nothing */ } lockdep_map_p;
199 #define mt_lock_is_held(mt)             1
200 #define mt_write_lock_is_held(mt)       1
201 #define mt_set_external_lock(mt, lock)  do { } while (0)
202 #define mt_on_stack(mt)                 do { } while (0)
203 #endif
204 
205 /*
206  * If the tree contains a single entry at index 0, it is usually stored in
207  * tree->ma_root.  To optimise for the page cache, an entry which ends in '00',
208  * '01' or '11' is stored in the root, but an entry which ends in '10' will be
209  * stored in a node.  Bits 3-6 are used to store enum maple_type.
210  *
211  * The flags are used both to store some immutable information about this tree
212  * (set at tree creation time) and dynamic information set under the spinlock.
213  *
214  * Another use of flags are to indicate global states of the tree.  This is the
215  * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
216  * RCU mode.  This mode was added to allow the tree to reuse nodes instead of
217  * re-allocating and RCU freeing nodes when there is a single user.
218  */
219 struct maple_tree {
220         union {
221                 spinlock_t      ma_lock;
222                 lockdep_map_p   ma_external_lock;
223         };
224         unsigned int    ma_flags;
225         void __rcu      *ma_root;
226 };
227 
228 /**
229  * MTREE_INIT() - Initialize a maple tree
230  * @name: The maple tree name
231  * @__flags: The maple tree flags
232  *
233  */
234 #define MTREE_INIT(name, __flags) {                                     \
235         .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock),                \
236         .ma_flags = __flags,                                            \
237         .ma_root = NULL,                                                \
238 }
239 
240 /**
241  * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
242  * @name: The tree name
243  * @__flags: The maple tree flags
244  * @__lock: The external lock
245  */
246 #ifdef CONFIG_LOCKDEP
247 #define MTREE_INIT_EXT(name, __flags, __lock) {                         \
248         .ma_external_lock = &(__lock).dep_map,                          \
249         .ma_flags = (__flags),                                          \
250         .ma_root = NULL,                                                \
251 }
252 #else
253 #define MTREE_INIT_EXT(name, __flags, __lock)   MTREE_INIT(name, __flags)
254 #endif
255 
256 #define DEFINE_MTREE(name)                                              \
257         struct maple_tree name = MTREE_INIT(name, 0)
258 
259 #define mtree_lock(mt)          spin_lock((&(mt)->ma_lock))
260 #define mtree_lock_nested(mas, subclass) \
261                 spin_lock_nested((&(mt)->ma_lock), subclass)
262 #define mtree_unlock(mt)        spin_unlock((&(mt)->ma_lock))
263 
264 /*
265  * The Maple Tree squeezes various bits in at various points which aren't
266  * necessarily obvious.  Usually, this is done by observing that pointers are
267  * N-byte aligned and thus the bottom log_2(N) bits are available for use.  We
268  * don't use the high bits of pointers to store additional information because
269  * we don't know what bits are unused on any given architecture.
270  *
271  * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
272  * low bits for our own purposes.  Nodes are currently of 4 types:
273  * 1. Single pointer (Range is 0-0)
274  * 2. Non-leaf Allocation Range nodes
275  * 3. Non-leaf Range nodes
276  * 4. Leaf Range nodes All nodes consist of a number of node slots,
277  *    pivots, and a parent pointer.
278  */
279 
280 struct maple_node {
281         union {
282                 struct {
283                         struct maple_pnode *parent;
284                         void __rcu *slot[MAPLE_NODE_SLOTS];
285                 };
286                 struct {
287                         void *pad;
288                         struct rcu_head rcu;
289                         struct maple_enode *piv_parent;
290                         unsigned char parent_slot;
291                         enum maple_type type;
292                         unsigned char slot_len;
293                         unsigned int ma_flags;
294                 };
295                 struct maple_range_64 mr64;
296                 struct maple_arange_64 ma64;
297                 struct maple_alloc alloc;
298         };
299 };
300 
301 /*
302  * More complicated stores can cause two nodes to become one or three and
303  * potentially alter the height of the tree.  Either half of the tree may need
304  * to be rebalanced against the other.  The ma_topiary struct is used to track
305  * which nodes have been 'cut' from the tree so that the change can be done
306  * safely at a later date.  This is done to support RCU.
307  */
308 struct ma_topiary {
309         struct maple_enode *head;
310         struct maple_enode *tail;
311         struct maple_tree *mtree;
312 };
313 
314 void *mtree_load(struct maple_tree *mt, unsigned long index);
315 
316 int mtree_insert(struct maple_tree *mt, unsigned long index,
317                 void *entry, gfp_t gfp);
318 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
319                 unsigned long last, void *entry, gfp_t gfp);
320 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
321                 void *entry, unsigned long size, unsigned long min,
322                 unsigned long max, gfp_t gfp);
323 int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
324                 void *entry, unsigned long range_lo, unsigned long range_hi,
325                 unsigned long *next, gfp_t gfp);
326 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
327                 void *entry, unsigned long size, unsigned long min,
328                 unsigned long max, gfp_t gfp);
329 
330 int mtree_store_range(struct maple_tree *mt, unsigned long first,
331                       unsigned long last, void *entry, gfp_t gfp);
332 int mtree_store(struct maple_tree *mt, unsigned long index,
333                 void *entry, gfp_t gfp);
334 void *mtree_erase(struct maple_tree *mt, unsigned long index);
335 
336 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
337 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
338 
339 void mtree_destroy(struct maple_tree *mt);
340 void __mt_destroy(struct maple_tree *mt);
341 
342 /**
343  * mtree_empty() - Determine if a tree has any present entries.
344  * @mt: Maple Tree.
345  *
346  * Context: Any context.
347  * Return: %true if the tree contains only NULL pointers.
348  */
349 static inline bool mtree_empty(const struct maple_tree *mt)
350 {
351         return mt->ma_root == NULL;
352 }
353 
354 /* Advanced API */
355 
356 /*
357  * Maple State Status
358  * ma_active means the maple state is pointing to a node and offset and can
359  * continue operating on the tree.
360  * ma_start means we have not searched the tree.
361  * ma_root means we have searched the tree and the entry we found lives in
362  * the root of the tree (ie it has index 0, length 1 and is the only entry in
363  * the tree).
364  * ma_none means we have searched the tree and there is no node in the
365  * tree for this entry.  For example, we searched for index 1 in an empty
366  * tree.  Or we have a tree which points to a full leaf node and we
367  * searched for an entry which is larger than can be contained in that
368  * leaf node.
369  * ma_pause means the data within the maple state may be stale, restart the
370  * operation
371  * ma_overflow means the search has reached the upper limit of the search
372  * ma_underflow means the search has reached the lower limit of the search
373  * ma_error means there was an error, check the node for the error number.
374  */
375 enum maple_status {
376         ma_active,
377         ma_start,
378         ma_root,
379         ma_none,
380         ma_pause,
381         ma_overflow,
382         ma_underflow,
383         ma_error,
384 };
385 
386 /*
387  * The maple state is defined in the struct ma_state and is used to keep track
388  * of information during operations, and even between operations when using the
389  * advanced API.
390  *
391  * If state->node has bit 0 set then it references a tree location which is not
392  * a node (eg the root).  If bit 1 is set, the rest of the bits are a negative
393  * errno.  Bit 2 (the 'unallocated slots' bit) is clear.  Bits 3-6 indicate the
394  * node type.
395  *
396  * state->alloc either has a request number of nodes or an allocated node.  If
397  * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
398  * and the remaining bits are the value.  If state->alloc is a node, then the
399  * node will be of type maple_alloc.  maple_alloc has MAPLE_NODE_SLOTS - 1 for
400  * storing more allocated nodes, a total number of nodes allocated, and the
401  * node_count in this node.  node_count is the number of allocated nodes in this
402  * node.  The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
403  * nodes into state->alloc->slot[0]'s node.  Nodes are taken from state->alloc
404  * by removing a node from the state->alloc node until state->alloc->node_count
405  * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
406  * to state->alloc.  Nodes are pushed onto state->alloc by putting the current
407  * state->alloc into the pushed node's slot[0].
408  *
409  * The state also contains the implied min/max of the state->node, the depth of
410  * this search, and the offset. The implied min/max are either from the parent
411  * node or are 0-oo for the root node.  The depth is incremented or decremented
412  * every time a node is walked down or up.  The offset is the slot/pivot of
413  * interest in the node - either for reading or writing.
414  *
415  * When returning a value the maple state index and last respectively contain
416  * the start and end of the range for the entry.  Ranges are inclusive in the
417  * Maple Tree.
418  *
419  * The status of the state is used to determine how the next action should treat
420  * the state.  For instance, if the status is ma_start then the next action
421  * should start at the root of the tree and walk down.  If the status is
422  * ma_pause then the node may be stale data and should be discarded.  If the
423  * status is ma_overflow, then the last action hit the upper limit.
424  *
425  */
426 struct ma_state {
427         struct maple_tree *tree;        /* The tree we're operating in */
428         unsigned long index;            /* The index we're operating on - range start */
429         unsigned long last;             /* The last index we're operating on - range end */
430         struct maple_enode *node;       /* The node containing this entry */
431         unsigned long min;              /* The minimum index of this node - implied pivot min */
432         unsigned long max;              /* The maximum index of this node - implied pivot max */
433         struct maple_alloc *alloc;      /* Allocated nodes for this operation */
434         enum maple_status status;       /* The status of the state (active, start, none, etc) */
435         unsigned char depth;            /* depth of tree descent during write */
436         unsigned char offset;
437         unsigned char mas_flags;
438         unsigned char end;              /* The end of the node */
439 };
440 
441 struct ma_wr_state {
442         struct ma_state *mas;
443         struct maple_node *node;        /* Decoded mas->node */
444         unsigned long r_min;            /* range min */
445         unsigned long r_max;            /* range max */
446         enum maple_type type;           /* mas->node type */
447         unsigned char offset_end;       /* The offset where the write ends */
448         unsigned long *pivots;          /* mas->node->pivots pointer */
449         unsigned long end_piv;          /* The pivot at the offset end */
450         void __rcu **slots;             /* mas->node->slots pointer */
451         void *entry;                    /* The entry to write */
452         void *content;                  /* The existing entry that is being overwritten */
453 };
454 
455 #define mas_lock(mas)           spin_lock(&((mas)->tree->ma_lock))
456 #define mas_lock_nested(mas, subclass) \
457                 spin_lock_nested(&((mas)->tree->ma_lock), subclass)
458 #define mas_unlock(mas)         spin_unlock(&((mas)->tree->ma_lock))
459 
460 /*
461  * Special values for ma_state.node.
462  * MA_ERROR represents an errno.  After dropping the lock and attempting
463  * to resolve the error, the walk would have to be restarted from the
464  * top of the tree as the tree may have been modified.
465  */
466 #define MA_ERROR(err) \
467                 ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
468 
469 #define MA_STATE(name, mt, first, end)                                  \
470         struct ma_state name = {                                        \
471                 .tree = mt,                                             \
472                 .index = first,                                         \
473                 .last = end,                                            \
474                 .node = NULL,                                           \
475                 .status = ma_start,                                     \
476                 .min = 0,                                               \
477                 .max = ULONG_MAX,                                       \
478                 .alloc = NULL,                                          \
479                 .mas_flags = 0,                                         \
480         }
481 
482 #define MA_WR_STATE(name, ma_state, wr_entry)                           \
483         struct ma_wr_state name = {                                     \
484                 .mas = ma_state,                                        \
485                 .content = NULL,                                        \
486                 .entry = wr_entry,                                      \
487         }
488 
489 #define MA_TOPIARY(name, tree)                                          \
490         struct ma_topiary name = {                                      \
491                 .head = NULL,                                           \
492                 .tail = NULL,                                           \
493                 .mtree = tree,                                          \
494         }
495 
496 void *mas_walk(struct ma_state *mas);
497 void *mas_store(struct ma_state *mas, void *entry);
498 void *mas_erase(struct ma_state *mas);
499 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
500 void mas_store_prealloc(struct ma_state *mas, void *entry);
501 void *mas_find(struct ma_state *mas, unsigned long max);
502 void *mas_find_range(struct ma_state *mas, unsigned long max);
503 void *mas_find_rev(struct ma_state *mas, unsigned long min);
504 void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
505 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
506 int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
507                 void *entry, unsigned long range_lo, unsigned long range_hi,
508                 unsigned long *next, gfp_t gfp);
509 
510 bool mas_nomem(struct ma_state *mas, gfp_t gfp);
511 void mas_pause(struct ma_state *mas);
512 void maple_tree_init(void);
513 void mas_destroy(struct ma_state *mas);
514 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
515 
516 void *mas_prev(struct ma_state *mas, unsigned long min);
517 void *mas_prev_range(struct ma_state *mas, unsigned long max);
518 void *mas_next(struct ma_state *mas, unsigned long max);
519 void *mas_next_range(struct ma_state *mas, unsigned long max);
520 
521 int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
522                    unsigned long size);
523 /*
524  * This finds an empty area from the highest address to the lowest.
525  * AKA "Topdown" version,
526  */
527 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
528                        unsigned long max, unsigned long size);
529 
530 static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
531                             unsigned long addr)
532 {
533         memset(mas, 0, sizeof(struct ma_state));
534         mas->tree = tree;
535         mas->index = mas->last = addr;
536         mas->max = ULONG_MAX;
537         mas->status = ma_start;
538         mas->node = NULL;
539 }
540 
541 static inline bool mas_is_active(struct ma_state *mas)
542 {
543         return mas->status == ma_active;
544 }
545 
546 static inline bool mas_is_err(struct ma_state *mas)
547 {
548         return mas->status == ma_error;
549 }
550 
551 /**
552  * mas_reset() - Reset a Maple Tree operation state.
553  * @mas: Maple Tree operation state.
554  *
555  * Resets the error or walk state of the @mas so future walks of the
556  * array will start from the root.  Use this if you have dropped the
557  * lock and want to reuse the ma_state.
558  *
559  * Context: Any context.
560  */
561 static __always_inline void mas_reset(struct ma_state *mas)
562 {
563         mas->status = ma_start;
564         mas->node = NULL;
565 }
566 
567 /**
568  * mas_for_each() - Iterate over a range of the maple tree.
569  * @__mas: Maple Tree operation state (maple_state)
570  * @__entry: Entry retrieved from the tree
571  * @__max: maximum index to retrieve from the tree
572  *
573  * When returned, mas->index and mas->last will hold the entire range for the
574  * entry.
575  *
576  * Note: may return the zero entry.
577  */
578 #define mas_for_each(__mas, __entry, __max) \
579         while (((__entry) = mas_find((__mas), (__max))) != NULL)
580 
581 #ifdef CONFIG_DEBUG_MAPLE_TREE
582 enum mt_dump_format {
583         mt_dump_dec,
584         mt_dump_hex,
585 };
586 
587 extern atomic_t maple_tree_tests_run;
588 extern atomic_t maple_tree_tests_passed;
589 
590 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
591 void mas_dump(const struct ma_state *mas);
592 void mas_wr_dump(const struct ma_wr_state *wr_mas);
593 void mt_validate(struct maple_tree *mt);
594 void mt_cache_shrink(void);
595 #define MT_BUG_ON(__tree, __x) do {                                     \
596         atomic_inc(&maple_tree_tests_run);                              \
597         if (__x) {                                                      \
598                 pr_info("BUG at %s:%d (%u)\n",                          \
599                 __func__, __LINE__, __x);                               \
600                 mt_dump(__tree, mt_dump_hex);                           \
601                 pr_info("Pass: %u Run:%u\n",                            \
602                         atomic_read(&maple_tree_tests_passed),          \
603                         atomic_read(&maple_tree_tests_run));            \
604                 dump_stack();                                           \
605         } else {                                                        \
606                 atomic_inc(&maple_tree_tests_passed);                   \
607         }                                                               \
608 } while (0)
609 
610 #define MAS_BUG_ON(__mas, __x) do {                                     \
611         atomic_inc(&maple_tree_tests_run);                              \
612         if (__x) {                                                      \
613                 pr_info("BUG at %s:%d (%u)\n",                          \
614                 __func__, __LINE__, __x);                               \
615                 mas_dump(__mas);                                        \
616                 mt_dump((__mas)->tree, mt_dump_hex);                    \
617                 pr_info("Pass: %u Run:%u\n",                            \
618                         atomic_read(&maple_tree_tests_passed),          \
619                         atomic_read(&maple_tree_tests_run));            \
620                 dump_stack();                                           \
621         } else {                                                        \
622                 atomic_inc(&maple_tree_tests_passed);                   \
623         }                                                               \
624 } while (0)
625 
626 #define MAS_WR_BUG_ON(__wrmas, __x) do {                                \
627         atomic_inc(&maple_tree_tests_run);                              \
628         if (__x) {                                                      \
629                 pr_info("BUG at %s:%d (%u)\n",                          \
630                 __func__, __LINE__, __x);                               \
631                 mas_wr_dump(__wrmas);                                   \
632                 mas_dump((__wrmas)->mas);                               \
633                 mt_dump((__wrmas)->mas->tree, mt_dump_hex);             \
634                 pr_info("Pass: %u Run:%u\n",                            \
635                         atomic_read(&maple_tree_tests_passed),          \
636                         atomic_read(&maple_tree_tests_run));            \
637                 dump_stack();                                           \
638         } else {                                                        \
639                 atomic_inc(&maple_tree_tests_passed);                   \
640         }                                                               \
641 } while (0)
642 
643 #define MT_WARN_ON(__tree, __x)  ({                                     \
644         int ret = !!(__x);                                              \
645         atomic_inc(&maple_tree_tests_run);                              \
646         if (ret) {                                                      \
647                 pr_info("WARN at %s:%d (%u)\n",                         \
648                 __func__, __LINE__, __x);                               \
649                 mt_dump(__tree, mt_dump_hex);                           \
650                 pr_info("Pass: %u Run:%u\n",                            \
651                         atomic_read(&maple_tree_tests_passed),          \
652                         atomic_read(&maple_tree_tests_run));            \
653                 dump_stack();                                           \
654         } else {                                                        \
655                 atomic_inc(&maple_tree_tests_passed);                   \
656         }                                                               \
657         unlikely(ret);                                                  \
658 })
659 
660 #define MAS_WARN_ON(__mas, __x) ({                                      \
661         int ret = !!(__x);                                              \
662         atomic_inc(&maple_tree_tests_run);                              \
663         if (ret) {                                                      \
664                 pr_info("WARN at %s:%d (%u)\n",                         \
665                 __func__, __LINE__, __x);                               \
666                 mas_dump(__mas);                                        \
667                 mt_dump((__mas)->tree, mt_dump_hex);                    \
668                 pr_info("Pass: %u Run:%u\n",                            \
669                         atomic_read(&maple_tree_tests_passed),          \
670                         atomic_read(&maple_tree_tests_run));            \
671                 dump_stack();                                           \
672         } else {                                                        \
673                 atomic_inc(&maple_tree_tests_passed);                   \
674         }                                                               \
675         unlikely(ret);                                                  \
676 })
677 
678 #define MAS_WR_WARN_ON(__wrmas, __x) ({                                 \
679         int ret = !!(__x);                                              \
680         atomic_inc(&maple_tree_tests_run);                              \
681         if (ret) {                                                      \
682                 pr_info("WARN at %s:%d (%u)\n",                         \
683                 __func__, __LINE__, __x);                               \
684                 mas_wr_dump(__wrmas);                                   \
685                 mas_dump((__wrmas)->mas);                               \
686                 mt_dump((__wrmas)->mas->tree, mt_dump_hex);             \
687                 pr_info("Pass: %u Run:%u\n",                            \
688                         atomic_read(&maple_tree_tests_passed),          \
689                         atomic_read(&maple_tree_tests_run));            \
690                 dump_stack();                                           \
691         } else {                                                        \
692                 atomic_inc(&maple_tree_tests_passed);                   \
693         }                                                               \
694         unlikely(ret);                                                  \
695 })
696 #else
697 #define MT_BUG_ON(__tree, __x)          BUG_ON(__x)
698 #define MAS_BUG_ON(__mas, __x)          BUG_ON(__x)
699 #define MAS_WR_BUG_ON(__mas, __x)       BUG_ON(__x)
700 #define MT_WARN_ON(__tree, __x)         WARN_ON(__x)
701 #define MAS_WARN_ON(__mas, __x)         WARN_ON(__x)
702 #define MAS_WR_WARN_ON(__mas, __x)      WARN_ON(__x)
703 #endif /* CONFIG_DEBUG_MAPLE_TREE */
704 
705 /**
706  * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
707  * current location.
708  * @mas: Maple Tree operation state.
709  * @start: New start of range in the Maple Tree.
710  * @last: New end of range in the Maple Tree.
711  *
712  * set the internal maple state values to a sub-range.
713  * Please use mas_set_range() if you do not know where you are in the tree.
714  */
715 static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
716                 unsigned long last)
717 {
718         /* Ensure the range starts within the current slot */
719         MAS_WARN_ON(mas, mas_is_active(mas) &&
720                    (mas->index > start || mas->last < start));
721         mas->index = start;
722         mas->last = last;
723 }
724 
725 /**
726  * mas_set_range() - Set up Maple Tree operation state for a different index.
727  * @mas: Maple Tree operation state.
728  * @start: New start of range in the Maple Tree.
729  * @last: New end of range in the Maple Tree.
730  *
731  * Move the operation state to refer to a different range.  This will
732  * have the effect of starting a walk from the top; see mas_next()
733  * to move to an adjacent index.
734  */
735 static inline
736 void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
737 {
738         mas_reset(mas);
739         __mas_set_range(mas, start, last);
740 }
741 
742 /**
743  * mas_set() - Set up Maple Tree operation state for a different index.
744  * @mas: Maple Tree operation state.
745  * @index: New index into the Maple Tree.
746  *
747  * Move the operation state to refer to a different index.  This will
748  * have the effect of starting a walk from the top; see mas_next()
749  * to move to an adjacent index.
750  */
751 static inline void mas_set(struct ma_state *mas, unsigned long index)
752 {
753 
754         mas_set_range(mas, index, index);
755 }
756 
757 static inline bool mt_external_lock(const struct maple_tree *mt)
758 {
759         return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
760 }
761 
762 /**
763  * mt_init_flags() - Initialise an empty maple tree with flags.
764  * @mt: Maple Tree
765  * @flags: maple tree flags.
766  *
767  * If you need to initialise a Maple Tree with special flags (eg, an
768  * allocation tree), use this function.
769  *
770  * Context: Any context.
771  */
772 static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
773 {
774         mt->ma_flags = flags;
775         if (!mt_external_lock(mt))
776                 spin_lock_init(&mt->ma_lock);
777         rcu_assign_pointer(mt->ma_root, NULL);
778 }
779 
780 /**
781  * mt_init() - Initialise an empty maple tree.
782  * @mt: Maple Tree
783  *
784  * An empty Maple Tree.
785  *
786  * Context: Any context.
787  */
788 static inline void mt_init(struct maple_tree *mt)
789 {
790         mt_init_flags(mt, 0);
791 }
792 
793 static inline bool mt_in_rcu(struct maple_tree *mt)
794 {
795 #ifdef CONFIG_MAPLE_RCU_DISABLED
796         return false;
797 #endif
798         return mt->ma_flags & MT_FLAGS_USE_RCU;
799 }
800 
801 /**
802  * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
803  * @mt: The Maple Tree
804  */
805 static inline void mt_clear_in_rcu(struct maple_tree *mt)
806 {
807         if (!mt_in_rcu(mt))
808                 return;
809 
810         if (mt_external_lock(mt)) {
811                 WARN_ON(!mt_lock_is_held(mt));
812                 mt->ma_flags &= ~MT_FLAGS_USE_RCU;
813         } else {
814                 mtree_lock(mt);
815                 mt->ma_flags &= ~MT_FLAGS_USE_RCU;
816                 mtree_unlock(mt);
817         }
818 }
819 
820 /**
821  * mt_set_in_rcu() - Switch the tree to RCU safe mode.
822  * @mt: The Maple Tree
823  */
824 static inline void mt_set_in_rcu(struct maple_tree *mt)
825 {
826         if (mt_in_rcu(mt))
827                 return;
828 
829         if (mt_external_lock(mt)) {
830                 WARN_ON(!mt_lock_is_held(mt));
831                 mt->ma_flags |= MT_FLAGS_USE_RCU;
832         } else {
833                 mtree_lock(mt);
834                 mt->ma_flags |= MT_FLAGS_USE_RCU;
835                 mtree_unlock(mt);
836         }
837 }
838 
839 static inline unsigned int mt_height(const struct maple_tree *mt)
840 {
841         return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
842 }
843 
844 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
845 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
846                     unsigned long max);
847 void *mt_prev(struct maple_tree *mt, unsigned long index,  unsigned long min);
848 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
849 
850 /**
851  * mt_for_each - Iterate over each entry starting at index until max.
852  * @__tree: The Maple Tree
853  * @__entry: The current entry
854  * @__index: The index to start the search from. Subsequently used as iterator.
855  * @__max: The maximum limit for @index
856  *
857  * This iterator skips all entries, which resolve to a NULL pointer,
858  * e.g. entries which has been reserved with XA_ZERO_ENTRY.
859  */
860 #define mt_for_each(__tree, __entry, __index, __max) \
861         for (__entry = mt_find(__tree, &(__index), __max); \
862                 __entry; __entry = mt_find_after(__tree, &(__index), __max))
863 
864 #endif /*_LINUX_MAPLE_TREE_H */
865 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php