~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/maple_tree.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0+
  2 /*
  3  * Maple Tree implementation
  4  * Copyright (c) 2018-2022 Oracle Corporation
  5  * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
  6  *          Matthew Wilcox <willy@infradead.org>
  7  * Copyright (c) 2023 ByteDance
  8  * Author: Peng Zhang <zhangpeng.00@bytedance.com>
  9  */
 10 
 11 /*
 12  * DOC: Interesting implementation details of the Maple Tree
 13  *
 14  * Each node type has a number of slots for entries and a number of slots for
 15  * pivots.  In the case of dense nodes, the pivots are implied by the position
 16  * and are simply the slot index + the minimum of the node.
 17  *
 18  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
 19  * indicate that the tree is specifying ranges.  Pivots may appear in the
 20  * subtree with an entry attached to the value whereas keys are unique to a
 21  * specific position of a B-tree.  Pivot values are inclusive of the slot with
 22  * the same index.
 23  *
 24  *
 25  * The following illustrates the layout of a range64 nodes slots and pivots.
 26  *
 27  *
 28  *  Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
 29  *           ┬   ┬   ┬   ┬     ┬    ┬    ┬    ┬    ┬
 30  *           │   │   │   │     │    │    │    │    └─ Implied maximum
 31  *           │   │   │   │     │    │    │    └─ Pivot 14
 32  *           │   │   │   │     │    │    └─ Pivot 13
 33  *           │   │   │   │     │    └─ Pivot 12
 34  *           │   │   │   │     └─ Pivot 11
 35  *           │   │   │   └─ Pivot 2
 36  *           │   │   └─ Pivot 1
 37  *           │   └─ Pivot 0
 38  *           └─  Implied minimum
 39  *
 40  * Slot contents:
 41  *  Internal (non-leaf) nodes contain pointers to other nodes.
 42  *  Leaf nodes contain entries.
 43  *
 44  * The location of interest is often referred to as an offset.  All offsets have
 45  * a slot, but the last offset has an implied pivot from the node above (or
 46  * UINT_MAX for the root node.
 47  *
 48  * Ranges complicate certain write activities.  When modifying any of
 49  * the B-tree variants, it is known that one entry will either be added or
 50  * deleted.  When modifying the Maple Tree, one store operation may overwrite
 51  * the entire data set, or one half of the tree, or the middle half of the tree.
 52  *
 53  */
 54 
 55 
 56 #include <linux/maple_tree.h>
 57 #include <linux/xarray.h>
 58 #include <linux/types.h>
 59 #include <linux/export.h>
 60 #include <linux/slab.h>
 61 #include <linux/limits.h>
 62 #include <asm/barrier.h>
 63 
 64 #define CREATE_TRACE_POINTS
 65 #include <trace/events/maple_tree.h>
 66 
 67 #define MA_ROOT_PARENT 1
 68 
 69 /*
 70  * Maple state flags
 71  * * MA_STATE_BULK              - Bulk insert mode
 72  * * MA_STATE_REBALANCE         - Indicate a rebalance during bulk insert
 73  * * MA_STATE_PREALLOC          - Preallocated nodes, WARN_ON allocation
 74  */
 75 #define MA_STATE_BULK           1
 76 #define MA_STATE_REBALANCE      2
 77 #define MA_STATE_PREALLOC       4
 78 
 79 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
 80 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
 81 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
 82 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
 83 static struct kmem_cache *maple_node_cache;
 84 
 85 #ifdef CONFIG_DEBUG_MAPLE_TREE
 86 static const unsigned long mt_max[] = {
 87         [maple_dense]           = MAPLE_NODE_SLOTS,
 88         [maple_leaf_64]         = ULONG_MAX,
 89         [maple_range_64]        = ULONG_MAX,
 90         [maple_arange_64]       = ULONG_MAX,
 91 };
 92 #define mt_node_max(x) mt_max[mte_node_type(x)]
 93 #endif
 94 
 95 static const unsigned char mt_slots[] = {
 96         [maple_dense]           = MAPLE_NODE_SLOTS,
 97         [maple_leaf_64]         = MAPLE_RANGE64_SLOTS,
 98         [maple_range_64]        = MAPLE_RANGE64_SLOTS,
 99         [maple_arange_64]       = MAPLE_ARANGE64_SLOTS,
100 };
101 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
102 
103 static const unsigned char mt_pivots[] = {
104         [maple_dense]           = 0,
105         [maple_leaf_64]         = MAPLE_RANGE64_SLOTS - 1,
106         [maple_range_64]        = MAPLE_RANGE64_SLOTS - 1,
107         [maple_arange_64]       = MAPLE_ARANGE64_SLOTS - 1,
108 };
109 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
110 
111 static const unsigned char mt_min_slots[] = {
112         [maple_dense]           = MAPLE_NODE_SLOTS / 2,
113         [maple_leaf_64]         = (MAPLE_RANGE64_SLOTS / 2) - 2,
114         [maple_range_64]        = (MAPLE_RANGE64_SLOTS / 2) - 2,
115         [maple_arange_64]       = (MAPLE_ARANGE64_SLOTS / 2) - 1,
116 };
117 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
118 
119 #define MAPLE_BIG_NODE_SLOTS    (MAPLE_RANGE64_SLOTS * 2 + 2)
120 #define MAPLE_BIG_NODE_GAPS     (MAPLE_ARANGE64_SLOTS * 2 + 1)
121 
122 struct maple_big_node {
123         struct maple_pnode *parent;
124         unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
125         union {
126                 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
127                 struct {
128                         unsigned long padding[MAPLE_BIG_NODE_GAPS];
129                         unsigned long gap[MAPLE_BIG_NODE_GAPS];
130                 };
131         };
132         unsigned char b_end;
133         enum maple_type type;
134 };
135 
136 /*
137  * The maple_subtree_state is used to build a tree to replace a segment of an
138  * existing tree in a more atomic way.  Any walkers of the older tree will hit a
139  * dead node and restart on updates.
140  */
141 struct maple_subtree_state {
142         struct ma_state *orig_l;        /* Original left side of subtree */
143         struct ma_state *orig_r;        /* Original right side of subtree */
144         struct ma_state *l;             /* New left side of subtree */
145         struct ma_state *m;             /* New middle of subtree (rare) */
146         struct ma_state *r;             /* New right side of subtree */
147         struct ma_topiary *free;        /* nodes to be freed */
148         struct ma_topiary *destroy;     /* Nodes to be destroyed (walked and freed) */
149         struct maple_big_node *bn;
150 };
151 
152 #ifdef CONFIG_KASAN_STACK
153 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
154 #define noinline_for_kasan noinline_for_stack
155 #else
156 #define noinline_for_kasan inline
157 #endif
158 
159 /* Functions */
160 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
161 {
162         return kmem_cache_alloc(maple_node_cache, gfp);
163 }
164 
165 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
166 {
167         return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
168 }
169 
170 static inline void mt_free_one(struct maple_node *node)
171 {
172         kmem_cache_free(maple_node_cache, node);
173 }
174 
175 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
176 {
177         kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
178 }
179 
180 static void mt_free_rcu(struct rcu_head *head)
181 {
182         struct maple_node *node = container_of(head, struct maple_node, rcu);
183 
184         kmem_cache_free(maple_node_cache, node);
185 }
186 
187 /*
188  * ma_free_rcu() - Use rcu callback to free a maple node
189  * @node: The node to free
190  *
191  * The maple tree uses the parent pointer to indicate this node is no longer in
192  * use and will be freed.
193  */
194 static void ma_free_rcu(struct maple_node *node)
195 {
196         WARN_ON(node->parent != ma_parent_ptr(node));
197         call_rcu(&node->rcu, mt_free_rcu);
198 }
199 
200 static void mas_set_height(struct ma_state *mas)
201 {
202         unsigned int new_flags = mas->tree->ma_flags;
203 
204         new_flags &= ~MT_FLAGS_HEIGHT_MASK;
205         MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
206         new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
207         mas->tree->ma_flags = new_flags;
208 }
209 
210 static unsigned int mas_mt_height(struct ma_state *mas)
211 {
212         return mt_height(mas->tree);
213 }
214 
215 static inline unsigned int mt_attr(struct maple_tree *mt)
216 {
217         return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
218 }
219 
220 static __always_inline enum maple_type mte_node_type(
221                 const struct maple_enode *entry)
222 {
223         return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
224                 MAPLE_NODE_TYPE_MASK;
225 }
226 
227 static __always_inline bool ma_is_dense(const enum maple_type type)
228 {
229         return type < maple_leaf_64;
230 }
231 
232 static __always_inline bool ma_is_leaf(const enum maple_type type)
233 {
234         return type < maple_range_64;
235 }
236 
237 static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
238 {
239         return ma_is_leaf(mte_node_type(entry));
240 }
241 
242 /*
243  * We also reserve values with the bottom two bits set to '10' which are
244  * below 4096
245  */
246 static __always_inline bool mt_is_reserved(const void *entry)
247 {
248         return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
249                 xa_is_internal(entry);
250 }
251 
252 static __always_inline void mas_set_err(struct ma_state *mas, long err)
253 {
254         mas->node = MA_ERROR(err);
255         mas->status = ma_error;
256 }
257 
258 static __always_inline bool mas_is_ptr(const struct ma_state *mas)
259 {
260         return mas->status == ma_root;
261 }
262 
263 static __always_inline bool mas_is_start(const struct ma_state *mas)
264 {
265         return mas->status == ma_start;
266 }
267 
268 static __always_inline bool mas_is_none(const struct ma_state *mas)
269 {
270         return mas->status == ma_none;
271 }
272 
273 static __always_inline bool mas_is_paused(const struct ma_state *mas)
274 {
275         return mas->status == ma_pause;
276 }
277 
278 static __always_inline bool mas_is_overflow(struct ma_state *mas)
279 {
280         return mas->status == ma_overflow;
281 }
282 
283 static inline bool mas_is_underflow(struct ma_state *mas)
284 {
285         return mas->status == ma_underflow;
286 }
287 
288 static __always_inline struct maple_node *mte_to_node(
289                 const struct maple_enode *entry)
290 {
291         return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
292 }
293 
294 /*
295  * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
296  * @entry: The maple encoded node
297  *
298  * Return: a maple topiary pointer
299  */
300 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
301 {
302         return (struct maple_topiary *)
303                 ((unsigned long)entry & ~MAPLE_NODE_MASK);
304 }
305 
306 /*
307  * mas_mn() - Get the maple state node.
308  * @mas: The maple state
309  *
310  * Return: the maple node (not encoded - bare pointer).
311  */
312 static inline struct maple_node *mas_mn(const struct ma_state *mas)
313 {
314         return mte_to_node(mas->node);
315 }
316 
317 /*
318  * mte_set_node_dead() - Set a maple encoded node as dead.
319  * @mn: The maple encoded node.
320  */
321 static inline void mte_set_node_dead(struct maple_enode *mn)
322 {
323         mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
324         smp_wmb(); /* Needed for RCU */
325 }
326 
327 /* Bit 1 indicates the root is a node */
328 #define MAPLE_ROOT_NODE                 0x02
329 /* maple_type stored bit 3-6 */
330 #define MAPLE_ENODE_TYPE_SHIFT          0x03
331 /* Bit 2 means a NULL somewhere below */
332 #define MAPLE_ENODE_NULL                0x04
333 
334 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
335                                              enum maple_type type)
336 {
337         return (void *)((unsigned long)node |
338                         (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
339 }
340 
341 static inline void *mte_mk_root(const struct maple_enode *node)
342 {
343         return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
344 }
345 
346 static inline void *mte_safe_root(const struct maple_enode *node)
347 {
348         return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
349 }
350 
351 static inline void *mte_set_full(const struct maple_enode *node)
352 {
353         return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
354 }
355 
356 static inline void *mte_clear_full(const struct maple_enode *node)
357 {
358         return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
359 }
360 
361 static inline bool mte_has_null(const struct maple_enode *node)
362 {
363         return (unsigned long)node & MAPLE_ENODE_NULL;
364 }
365 
366 static __always_inline bool ma_is_root(struct maple_node *node)
367 {
368         return ((unsigned long)node->parent & MA_ROOT_PARENT);
369 }
370 
371 static __always_inline bool mte_is_root(const struct maple_enode *node)
372 {
373         return ma_is_root(mte_to_node(node));
374 }
375 
376 static inline bool mas_is_root_limits(const struct ma_state *mas)
377 {
378         return !mas->min && mas->max == ULONG_MAX;
379 }
380 
381 static __always_inline bool mt_is_alloc(struct maple_tree *mt)
382 {
383         return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
384 }
385 
386 /*
387  * The Parent Pointer
388  * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
389  * When storing a 32 or 64 bit values, the offset can fit into 5 bits.  The 16
390  * bit values need an extra bit to store the offset.  This extra bit comes from
391  * a reuse of the last bit in the node type.  This is possible by using bit 1 to
392  * indicate if bit 2 is part of the type or the slot.
393  *
394  * Note types:
395  *  0x??1 = Root
396  *  0x?00 = 16 bit nodes
397  *  0x010 = 32 bit nodes
398  *  0x110 = 64 bit nodes
399  *
400  * Slot size and alignment
401  *  0b??1 : Root
402  *  0b?00 : 16 bit values, type in 0-1, slot in 2-7
403  *  0b010 : 32 bit values, type in 0-2, slot in 3-7
404  *  0b110 : 64 bit values, type in 0-2, slot in 3-7
405  */
406 
407 #define MAPLE_PARENT_ROOT               0x01
408 
409 #define MAPLE_PARENT_SLOT_SHIFT         0x03
410 #define MAPLE_PARENT_SLOT_MASK          0xF8
411 
412 #define MAPLE_PARENT_16B_SLOT_SHIFT     0x02
413 #define MAPLE_PARENT_16B_SLOT_MASK      0xFC
414 
415 #define MAPLE_PARENT_RANGE64            0x06
416 #define MAPLE_PARENT_RANGE32            0x04
417 #define MAPLE_PARENT_NOT_RANGE16        0x02
418 
419 /*
420  * mte_parent_shift() - Get the parent shift for the slot storage.
421  * @parent: The parent pointer cast as an unsigned long
422  * Return: The shift into that pointer to the star to of the slot
423  */
424 static inline unsigned long mte_parent_shift(unsigned long parent)
425 {
426         /* Note bit 1 == 0 means 16B */
427         if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
428                 return MAPLE_PARENT_SLOT_SHIFT;
429 
430         return MAPLE_PARENT_16B_SLOT_SHIFT;
431 }
432 
433 /*
434  * mte_parent_slot_mask() - Get the slot mask for the parent.
435  * @parent: The parent pointer cast as an unsigned long.
436  * Return: The slot mask for that parent.
437  */
438 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
439 {
440         /* Note bit 1 == 0 means 16B */
441         if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
442                 return MAPLE_PARENT_SLOT_MASK;
443 
444         return MAPLE_PARENT_16B_SLOT_MASK;
445 }
446 
447 /*
448  * mas_parent_type() - Return the maple_type of the parent from the stored
449  * parent type.
450  * @mas: The maple state
451  * @enode: The maple_enode to extract the parent's enum
452  * Return: The node->parent maple_type
453  */
454 static inline
455 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
456 {
457         unsigned long p_type;
458 
459         p_type = (unsigned long)mte_to_node(enode)->parent;
460         if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
461                 return 0;
462 
463         p_type &= MAPLE_NODE_MASK;
464         p_type &= ~mte_parent_slot_mask(p_type);
465         switch (p_type) {
466         case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
467                 if (mt_is_alloc(mas->tree))
468                         return maple_arange_64;
469                 return maple_range_64;
470         }
471 
472         return 0;
473 }
474 
475 /*
476  * mas_set_parent() - Set the parent node and encode the slot
477  * @enode: The encoded maple node.
478  * @parent: The encoded maple node that is the parent of @enode.
479  * @slot: The slot that @enode resides in @parent.
480  *
481  * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
482  * parent type.
483  */
484 static inline
485 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
486                     const struct maple_enode *parent, unsigned char slot)
487 {
488         unsigned long val = (unsigned long)parent;
489         unsigned long shift;
490         unsigned long type;
491         enum maple_type p_type = mte_node_type(parent);
492 
493         MAS_BUG_ON(mas, p_type == maple_dense);
494         MAS_BUG_ON(mas, p_type == maple_leaf_64);
495 
496         switch (p_type) {
497         case maple_range_64:
498         case maple_arange_64:
499                 shift = MAPLE_PARENT_SLOT_SHIFT;
500                 type = MAPLE_PARENT_RANGE64;
501                 break;
502         default:
503         case maple_dense:
504         case maple_leaf_64:
505                 shift = type = 0;
506                 break;
507         }
508 
509         val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
510         val |= (slot << shift) | type;
511         mte_to_node(enode)->parent = ma_parent_ptr(val);
512 }
513 
514 /*
515  * mte_parent_slot() - get the parent slot of @enode.
516  * @enode: The encoded maple node.
517  *
518  * Return: The slot in the parent node where @enode resides.
519  */
520 static __always_inline
521 unsigned int mte_parent_slot(const struct maple_enode *enode)
522 {
523         unsigned long val = (unsigned long)mte_to_node(enode)->parent;
524 
525         if (unlikely(val & MA_ROOT_PARENT))
526                 return 0;
527 
528         /*
529          * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
530          * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
531          */
532         return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
533 }
534 
535 /*
536  * mte_parent() - Get the parent of @node.
537  * @node: The encoded maple node.
538  *
539  * Return: The parent maple node.
540  */
541 static __always_inline
542 struct maple_node *mte_parent(const struct maple_enode *enode)
543 {
544         return (void *)((unsigned long)
545                         (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
546 }
547 
548 /*
549  * ma_dead_node() - check if the @enode is dead.
550  * @enode: The encoded maple node
551  *
552  * Return: true if dead, false otherwise.
553  */
554 static __always_inline bool ma_dead_node(const struct maple_node *node)
555 {
556         struct maple_node *parent;
557 
558         /* Do not reorder reads from the node prior to the parent check */
559         smp_rmb();
560         parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
561         return (parent == node);
562 }
563 
564 /*
565  * mte_dead_node() - check if the @enode is dead.
566  * @enode: The encoded maple node
567  *
568  * Return: true if dead, false otherwise.
569  */
570 static __always_inline bool mte_dead_node(const struct maple_enode *enode)
571 {
572         struct maple_node *parent, *node;
573 
574         node = mte_to_node(enode);
575         /* Do not reorder reads from the node prior to the parent check */
576         smp_rmb();
577         parent = mte_parent(enode);
578         return (parent == node);
579 }
580 
581 /*
582  * mas_allocated() - Get the number of nodes allocated in a maple state.
583  * @mas: The maple state
584  *
585  * The ma_state alloc member is overloaded to hold a pointer to the first
586  * allocated node or to the number of requested nodes to allocate.  If bit 0 is
587  * set, then the alloc contains the number of requested nodes.  If there is an
588  * allocated node, then the total allocated nodes is in that node.
589  *
590  * Return: The total number of nodes allocated
591  */
592 static inline unsigned long mas_allocated(const struct ma_state *mas)
593 {
594         if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
595                 return 0;
596 
597         return mas->alloc->total;
598 }
599 
600 /*
601  * mas_set_alloc_req() - Set the requested number of allocations.
602  * @mas: the maple state
603  * @count: the number of allocations.
604  *
605  * The requested number of allocations is either in the first allocated node,
606  * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
607  * no allocated node.  Set the request either in the node or do the necessary
608  * encoding to store in @mas->alloc directly.
609  */
610 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
611 {
612         if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
613                 if (!count)
614                         mas->alloc = NULL;
615                 else
616                         mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
617                 return;
618         }
619 
620         mas->alloc->request_count = count;
621 }
622 
623 /*
624  * mas_alloc_req() - get the requested number of allocations.
625  * @mas: The maple state
626  *
627  * The alloc count is either stored directly in @mas, or in
628  * @mas->alloc->request_count if there is at least one node allocated.  Decode
629  * the request count if it's stored directly in @mas->alloc.
630  *
631  * Return: The allocation request count.
632  */
633 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
634 {
635         if ((unsigned long)mas->alloc & 0x1)
636                 return (unsigned long)(mas->alloc) >> 1;
637         else if (mas->alloc)
638                 return mas->alloc->request_count;
639         return 0;
640 }
641 
642 /*
643  * ma_pivots() - Get a pointer to the maple node pivots.
644  * @node - the maple node
645  * @type - the node type
646  *
647  * In the event of a dead node, this array may be %NULL
648  *
649  * Return: A pointer to the maple node pivots
650  */
651 static inline unsigned long *ma_pivots(struct maple_node *node,
652                                            enum maple_type type)
653 {
654         switch (type) {
655         case maple_arange_64:
656                 return node->ma64.pivot;
657         case maple_range_64:
658         case maple_leaf_64:
659                 return node->mr64.pivot;
660         case maple_dense:
661                 return NULL;
662         }
663         return NULL;
664 }
665 
666 /*
667  * ma_gaps() - Get a pointer to the maple node gaps.
668  * @node - the maple node
669  * @type - the node type
670  *
671  * Return: A pointer to the maple node gaps
672  */
673 static inline unsigned long *ma_gaps(struct maple_node *node,
674                                      enum maple_type type)
675 {
676         switch (type) {
677         case maple_arange_64:
678                 return node->ma64.gap;
679         case maple_range_64:
680         case maple_leaf_64:
681         case maple_dense:
682                 return NULL;
683         }
684         return NULL;
685 }
686 
687 /*
688  * mas_safe_pivot() - get the pivot at @piv or mas->max.
689  * @mas: The maple state
690  * @pivots: The pointer to the maple node pivots
691  * @piv: The pivot to fetch
692  * @type: The maple node type
693  *
694  * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
695  * otherwise.
696  */
697 static __always_inline unsigned long
698 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
699                unsigned char piv, enum maple_type type)
700 {
701         if (piv >= mt_pivots[type])
702                 return mas->max;
703 
704         return pivots[piv];
705 }
706 
707 /*
708  * mas_safe_min() - Return the minimum for a given offset.
709  * @mas: The maple state
710  * @pivots: The pointer to the maple node pivots
711  * @offset: The offset into the pivot array
712  *
713  * Return: The minimum range value that is contained in @offset.
714  */
715 static inline unsigned long
716 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
717 {
718         if (likely(offset))
719                 return pivots[offset - 1] + 1;
720 
721         return mas->min;
722 }
723 
724 /*
725  * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
726  * @mn: The encoded maple node
727  * @piv: The pivot offset
728  * @val: The value of the pivot
729  */
730 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
731                                 unsigned long val)
732 {
733         struct maple_node *node = mte_to_node(mn);
734         enum maple_type type = mte_node_type(mn);
735 
736         BUG_ON(piv >= mt_pivots[type]);
737         switch (type) {
738         case maple_range_64:
739         case maple_leaf_64:
740                 node->mr64.pivot[piv] = val;
741                 break;
742         case maple_arange_64:
743                 node->ma64.pivot[piv] = val;
744                 break;
745         case maple_dense:
746                 break;
747         }
748 
749 }
750 
751 /*
752  * ma_slots() - Get a pointer to the maple node slots.
753  * @mn: The maple node
754  * @mt: The maple node type
755  *
756  * Return: A pointer to the maple node slots
757  */
758 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
759 {
760         switch (mt) {
761         case maple_arange_64:
762                 return mn->ma64.slot;
763         case maple_range_64:
764         case maple_leaf_64:
765                 return mn->mr64.slot;
766         case maple_dense:
767                 return mn->slot;
768         }
769 
770         return NULL;
771 }
772 
773 static inline bool mt_write_locked(const struct maple_tree *mt)
774 {
775         return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
776                 lockdep_is_held(&mt->ma_lock);
777 }
778 
779 static __always_inline bool mt_locked(const struct maple_tree *mt)
780 {
781         return mt_external_lock(mt) ? mt_lock_is_held(mt) :
782                 lockdep_is_held(&mt->ma_lock);
783 }
784 
785 static __always_inline void *mt_slot(const struct maple_tree *mt,
786                 void __rcu **slots, unsigned char offset)
787 {
788         return rcu_dereference_check(slots[offset], mt_locked(mt));
789 }
790 
791 static __always_inline void *mt_slot_locked(struct maple_tree *mt,
792                 void __rcu **slots, unsigned char offset)
793 {
794         return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
795 }
796 /*
797  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
798  * @mas: The maple state
799  * @slots: The pointer to the slots
800  * @offset: The offset into the slots array to fetch
801  *
802  * Return: The entry stored in @slots at the @offset.
803  */
804 static __always_inline void *mas_slot_locked(struct ma_state *mas,
805                 void __rcu **slots, unsigned char offset)
806 {
807         return mt_slot_locked(mas->tree, slots, offset);
808 }
809 
810 /*
811  * mas_slot() - Get the slot value when not holding the maple tree lock.
812  * @mas: The maple state
813  * @slots: The pointer to the slots
814  * @offset: The offset into the slots array to fetch
815  *
816  * Return: The entry stored in @slots at the @offset
817  */
818 static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
819                 unsigned char offset)
820 {
821         return mt_slot(mas->tree, slots, offset);
822 }
823 
824 /*
825  * mas_root() - Get the maple tree root.
826  * @mas: The maple state.
827  *
828  * Return: The pointer to the root of the tree
829  */
830 static __always_inline void *mas_root(struct ma_state *mas)
831 {
832         return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
833 }
834 
835 static inline void *mt_root_locked(struct maple_tree *mt)
836 {
837         return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
838 }
839 
840 /*
841  * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
842  * @mas: The maple state.
843  *
844  * Return: The pointer to the root of the tree
845  */
846 static inline void *mas_root_locked(struct ma_state *mas)
847 {
848         return mt_root_locked(mas->tree);
849 }
850 
851 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
852                                              enum maple_type mt)
853 {
854         switch (mt) {
855         case maple_arange_64:
856                 return &mn->ma64.meta;
857         default:
858                 return &mn->mr64.meta;
859         }
860 }
861 
862 /*
863  * ma_set_meta() - Set the metadata information of a node.
864  * @mn: The maple node
865  * @mt: The maple node type
866  * @offset: The offset of the highest sub-gap in this node.
867  * @end: The end of the data in this node.
868  */
869 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
870                                unsigned char offset, unsigned char end)
871 {
872         struct maple_metadata *meta = ma_meta(mn, mt);
873 
874         meta->gap = offset;
875         meta->end = end;
876 }
877 
878 /*
879  * mt_clear_meta() - clear the metadata information of a node, if it exists
880  * @mt: The maple tree
881  * @mn: The maple node
882  * @type: The maple node type
883  * @offset: The offset of the highest sub-gap in this node.
884  * @end: The end of the data in this node.
885  */
886 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
887                                   enum maple_type type)
888 {
889         struct maple_metadata *meta;
890         unsigned long *pivots;
891         void __rcu **slots;
892         void *next;
893 
894         switch (type) {
895         case maple_range_64:
896                 pivots = mn->mr64.pivot;
897                 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
898                         slots = mn->mr64.slot;
899                         next = mt_slot_locked(mt, slots,
900                                               MAPLE_RANGE64_SLOTS - 1);
901                         if (unlikely((mte_to_node(next) &&
902                                       mte_node_type(next))))
903                                 return; /* no metadata, could be node */
904                 }
905                 fallthrough;
906         case maple_arange_64:
907                 meta = ma_meta(mn, type);
908                 break;
909         default:
910                 return;
911         }
912 
913         meta->gap = 0;
914         meta->end = 0;
915 }
916 
917 /*
918  * ma_meta_end() - Get the data end of a node from the metadata
919  * @mn: The maple node
920  * @mt: The maple node type
921  */
922 static inline unsigned char ma_meta_end(struct maple_node *mn,
923                                         enum maple_type mt)
924 {
925         struct maple_metadata *meta = ma_meta(mn, mt);
926 
927         return meta->end;
928 }
929 
930 /*
931  * ma_meta_gap() - Get the largest gap location of a node from the metadata
932  * @mn: The maple node
933  */
934 static inline unsigned char ma_meta_gap(struct maple_node *mn)
935 {
936         return mn->ma64.meta.gap;
937 }
938 
939 /*
940  * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
941  * @mn: The maple node
942  * @mn: The maple node type
943  * @offset: The location of the largest gap.
944  */
945 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
946                                    unsigned char offset)
947 {
948 
949         struct maple_metadata *meta = ma_meta(mn, mt);
950 
951         meta->gap = offset;
952 }
953 
954 /*
955  * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
956  * @mat - the ma_topiary, a linked list of dead nodes.
957  * @dead_enode - the node to be marked as dead and added to the tail of the list
958  *
959  * Add the @dead_enode to the linked list in @mat.
960  */
961 static inline void mat_add(struct ma_topiary *mat,
962                            struct maple_enode *dead_enode)
963 {
964         mte_set_node_dead(dead_enode);
965         mte_to_mat(dead_enode)->next = NULL;
966         if (!mat->tail) {
967                 mat->tail = mat->head = dead_enode;
968                 return;
969         }
970 
971         mte_to_mat(mat->tail)->next = dead_enode;
972         mat->tail = dead_enode;
973 }
974 
975 static void mt_free_walk(struct rcu_head *head);
976 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
977                             bool free);
978 /*
979  * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
980  * @mas - the maple state
981  * @mat - the ma_topiary linked list of dead nodes to free.
982  *
983  * Destroy walk a dead list.
984  */
985 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
986 {
987         struct maple_enode *next;
988         struct maple_node *node;
989         bool in_rcu = mt_in_rcu(mas->tree);
990 
991         while (mat->head) {
992                 next = mte_to_mat(mat->head)->next;
993                 node = mte_to_node(mat->head);
994                 mt_destroy_walk(mat->head, mas->tree, !in_rcu);
995                 if (in_rcu)
996                         call_rcu(&node->rcu, mt_free_walk);
997                 mat->head = next;
998         }
999 }
1000 /*
1001  * mas_descend() - Descend into the slot stored in the ma_state.
1002  * @mas - the maple state.
1003  *
1004  * Note: Not RCU safe, only use in write side or debug code.
1005  */
1006 static inline void mas_descend(struct ma_state *mas)
1007 {
1008         enum maple_type type;
1009         unsigned long *pivots;
1010         struct maple_node *node;
1011         void __rcu **slots;
1012 
1013         node = mas_mn(mas);
1014         type = mte_node_type(mas->node);
1015         pivots = ma_pivots(node, type);
1016         slots = ma_slots(node, type);
1017 
1018         if (mas->offset)
1019                 mas->min = pivots[mas->offset - 1] + 1;
1020         mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1021         mas->node = mas_slot(mas, slots, mas->offset);
1022 }
1023 
1024 /*
1025  * mte_set_gap() - Set a maple node gap.
1026  * @mn: The encoded maple node
1027  * @gap: The offset of the gap to set
1028  * @val: The gap value
1029  */
1030 static inline void mte_set_gap(const struct maple_enode *mn,
1031                                  unsigned char gap, unsigned long val)
1032 {
1033         switch (mte_node_type(mn)) {
1034         default:
1035                 break;
1036         case maple_arange_64:
1037                 mte_to_node(mn)->ma64.gap[gap] = val;
1038                 break;
1039         }
1040 }
1041 
1042 /*
1043  * mas_ascend() - Walk up a level of the tree.
1044  * @mas: The maple state
1045  *
1046  * Sets the @mas->max and @mas->min to the correct values when walking up.  This
1047  * may cause several levels of walking up to find the correct min and max.
1048  * May find a dead node which will cause a premature return.
1049  * Return: 1 on dead node, 0 otherwise
1050  */
1051 static int mas_ascend(struct ma_state *mas)
1052 {
1053         struct maple_enode *p_enode; /* parent enode. */
1054         struct maple_enode *a_enode; /* ancestor enode. */
1055         struct maple_node *a_node; /* ancestor node. */
1056         struct maple_node *p_node; /* parent node. */
1057         unsigned char a_slot;
1058         enum maple_type a_type;
1059         unsigned long min, max;
1060         unsigned long *pivots;
1061         bool set_max = false, set_min = false;
1062 
1063         a_node = mas_mn(mas);
1064         if (ma_is_root(a_node)) {
1065                 mas->offset = 0;
1066                 return 0;
1067         }
1068 
1069         p_node = mte_parent(mas->node);
1070         if (unlikely(a_node == p_node))
1071                 return 1;
1072 
1073         a_type = mas_parent_type(mas, mas->node);
1074         mas->offset = mte_parent_slot(mas->node);
1075         a_enode = mt_mk_node(p_node, a_type);
1076 
1077         /* Check to make sure all parent information is still accurate */
1078         if (p_node != mte_parent(mas->node))
1079                 return 1;
1080 
1081         mas->node = a_enode;
1082 
1083         if (mte_is_root(a_enode)) {
1084                 mas->max = ULONG_MAX;
1085                 mas->min = 0;
1086                 return 0;
1087         }
1088 
1089         min = 0;
1090         max = ULONG_MAX;
1091         if (!mas->offset) {
1092                 min = mas->min;
1093                 set_min = true;
1094         }
1095 
1096         if (mas->max == ULONG_MAX)
1097                 set_max = true;
1098 
1099         do {
1100                 p_enode = a_enode;
1101                 a_type = mas_parent_type(mas, p_enode);
1102                 a_node = mte_parent(p_enode);
1103                 a_slot = mte_parent_slot(p_enode);
1104                 a_enode = mt_mk_node(a_node, a_type);
1105                 pivots = ma_pivots(a_node, a_type);
1106 
1107                 if (unlikely(ma_dead_node(a_node)))
1108                         return 1;
1109 
1110                 if (!set_min && a_slot) {
1111                         set_min = true;
1112                         min = pivots[a_slot - 1] + 1;
1113                 }
1114 
1115                 if (!set_max && a_slot < mt_pivots[a_type]) {
1116                         set_max = true;
1117                         max = pivots[a_slot];
1118                 }
1119 
1120                 if (unlikely(ma_dead_node(a_node)))
1121                         return 1;
1122 
1123                 if (unlikely(ma_is_root(a_node)))
1124                         break;
1125 
1126         } while (!set_min || !set_max);
1127 
1128         mas->max = max;
1129         mas->min = min;
1130         return 0;
1131 }
1132 
1133 /*
1134  * mas_pop_node() - Get a previously allocated maple node from the maple state.
1135  * @mas: The maple state
1136  *
1137  * Return: A pointer to a maple node.
1138  */
1139 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1140 {
1141         struct maple_alloc *ret, *node = mas->alloc;
1142         unsigned long total = mas_allocated(mas);
1143         unsigned int req = mas_alloc_req(mas);
1144 
1145         /* nothing or a request pending. */
1146         if (WARN_ON(!total))
1147                 return NULL;
1148 
1149         if (total == 1) {
1150                 /* single allocation in this ma_state */
1151                 mas->alloc = NULL;
1152                 ret = node;
1153                 goto single_node;
1154         }
1155 
1156         if (node->node_count == 1) {
1157                 /* Single allocation in this node. */
1158                 mas->alloc = node->slot[0];
1159                 mas->alloc->total = node->total - 1;
1160                 ret = node;
1161                 goto new_head;
1162         }
1163         node->total--;
1164         ret = node->slot[--node->node_count];
1165         node->slot[node->node_count] = NULL;
1166 
1167 single_node:
1168 new_head:
1169         if (req) {
1170                 req++;
1171                 mas_set_alloc_req(mas, req);
1172         }
1173 
1174         memset(ret, 0, sizeof(*ret));
1175         return (struct maple_node *)ret;
1176 }
1177 
1178 /*
1179  * mas_push_node() - Push a node back on the maple state allocation.
1180  * @mas: The maple state
1181  * @used: The used maple node
1182  *
1183  * Stores the maple node back into @mas->alloc for reuse.  Updates allocated and
1184  * requested node count as necessary.
1185  */
1186 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1187 {
1188         struct maple_alloc *reuse = (struct maple_alloc *)used;
1189         struct maple_alloc *head = mas->alloc;
1190         unsigned long count;
1191         unsigned int requested = mas_alloc_req(mas);
1192 
1193         count = mas_allocated(mas);
1194 
1195         reuse->request_count = 0;
1196         reuse->node_count = 0;
1197         if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1198                 head->slot[head->node_count++] = reuse;
1199                 head->total++;
1200                 goto done;
1201         }
1202 
1203         reuse->total = 1;
1204         if ((head) && !((unsigned long)head & 0x1)) {
1205                 reuse->slot[0] = head;
1206                 reuse->node_count = 1;
1207                 reuse->total += head->total;
1208         }
1209 
1210         mas->alloc = reuse;
1211 done:
1212         if (requested > 1)
1213                 mas_set_alloc_req(mas, requested - 1);
1214 }
1215 
1216 /*
1217  * mas_alloc_nodes() - Allocate nodes into a maple state
1218  * @mas: The maple state
1219  * @gfp: The GFP Flags
1220  */
1221 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1222 {
1223         struct maple_alloc *node;
1224         unsigned long allocated = mas_allocated(mas);
1225         unsigned int requested = mas_alloc_req(mas);
1226         unsigned int count;
1227         void **slots = NULL;
1228         unsigned int max_req = 0;
1229 
1230         if (!requested)
1231                 return;
1232 
1233         mas_set_alloc_req(mas, 0);
1234         if (mas->mas_flags & MA_STATE_PREALLOC) {
1235                 if (allocated)
1236                         return;
1237                 BUG_ON(!allocated);
1238                 WARN_ON(!allocated);
1239         }
1240 
1241         if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1242                 node = (struct maple_alloc *)mt_alloc_one(gfp);
1243                 if (!node)
1244                         goto nomem_one;
1245 
1246                 if (allocated) {
1247                         node->slot[0] = mas->alloc;
1248                         node->node_count = 1;
1249                 } else {
1250                         node->node_count = 0;
1251                 }
1252 
1253                 mas->alloc = node;
1254                 node->total = ++allocated;
1255                 requested--;
1256         }
1257 
1258         node = mas->alloc;
1259         node->request_count = 0;
1260         while (requested) {
1261                 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1262                 slots = (void **)&node->slot[node->node_count];
1263                 max_req = min(requested, max_req);
1264                 count = mt_alloc_bulk(gfp, max_req, slots);
1265                 if (!count)
1266                         goto nomem_bulk;
1267 
1268                 if (node->node_count == 0) {
1269                         node->slot[0]->node_count = 0;
1270                         node->slot[0]->request_count = 0;
1271                 }
1272 
1273                 node->node_count += count;
1274                 allocated += count;
1275                 node = node->slot[0];
1276                 requested -= count;
1277         }
1278         mas->alloc->total = allocated;
1279         return;
1280 
1281 nomem_bulk:
1282         /* Clean up potential freed allocations on bulk failure */
1283         memset(slots, 0, max_req * sizeof(unsigned long));
1284 nomem_one:
1285         mas_set_alloc_req(mas, requested);
1286         if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1287                 mas->alloc->total = allocated;
1288         mas_set_err(mas, -ENOMEM);
1289 }
1290 
1291 /*
1292  * mas_free() - Free an encoded maple node
1293  * @mas: The maple state
1294  * @used: The encoded maple node to free.
1295  *
1296  * Uses rcu free if necessary, pushes @used back on the maple state allocations
1297  * otherwise.
1298  */
1299 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1300 {
1301         struct maple_node *tmp = mte_to_node(used);
1302 
1303         if (mt_in_rcu(mas->tree))
1304                 ma_free_rcu(tmp);
1305         else
1306                 mas_push_node(mas, tmp);
1307 }
1308 
1309 /*
1310  * mas_node_count_gfp() - Check if enough nodes are allocated and request more
1311  * if there is not enough nodes.
1312  * @mas: The maple state
1313  * @count: The number of nodes needed
1314  * @gfp: the gfp flags
1315  */
1316 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1317 {
1318         unsigned long allocated = mas_allocated(mas);
1319 
1320         if (allocated < count) {
1321                 mas_set_alloc_req(mas, count - allocated);
1322                 mas_alloc_nodes(mas, gfp);
1323         }
1324 }
1325 
1326 /*
1327  * mas_node_count() - Check if enough nodes are allocated and request more if
1328  * there is not enough nodes.
1329  * @mas: The maple state
1330  * @count: The number of nodes needed
1331  *
1332  * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1333  */
1334 static void mas_node_count(struct ma_state *mas, int count)
1335 {
1336         return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1337 }
1338 
1339 /*
1340  * mas_start() - Sets up maple state for operations.
1341  * @mas: The maple state.
1342  *
1343  * If mas->status == mas_start, then set the min, max and depth to
1344  * defaults.
1345  *
1346  * Return:
1347  * - If mas->node is an error or not mas_start, return NULL.
1348  * - If it's an empty tree:     NULL & mas->status == ma_none
1349  * - If it's a single entry:    The entry & mas->status == mas_root
1350  * - If it's a tree:            NULL & mas->status == safe root node.
1351  */
1352 static inline struct maple_enode *mas_start(struct ma_state *mas)
1353 {
1354         if (likely(mas_is_start(mas))) {
1355                 struct maple_enode *root;
1356 
1357                 mas->min = 0;
1358                 mas->max = ULONG_MAX;
1359 
1360 retry:
1361                 mas->depth = 0;
1362                 root = mas_root(mas);
1363                 /* Tree with nodes */
1364                 if (likely(xa_is_node(root))) {
1365                         mas->depth = 1;
1366                         mas->status = ma_active;
1367                         mas->node = mte_safe_root(root);
1368                         mas->offset = 0;
1369                         if (mte_dead_node(mas->node))
1370                                 goto retry;
1371 
1372                         return NULL;
1373                 }
1374 
1375                 /* empty tree */
1376                 if (unlikely(!root)) {
1377                         mas->node = NULL;
1378                         mas->status = ma_none;
1379                         mas->offset = MAPLE_NODE_SLOTS;
1380                         return NULL;
1381                 }
1382 
1383                 /* Single entry tree */
1384                 mas->status = ma_root;
1385                 mas->offset = MAPLE_NODE_SLOTS;
1386 
1387                 /* Single entry tree. */
1388                 if (mas->index > 0)
1389                         return NULL;
1390 
1391                 return root;
1392         }
1393 
1394         return NULL;
1395 }
1396 
1397 /*
1398  * ma_data_end() - Find the end of the data in a node.
1399  * @node: The maple node
1400  * @type: The maple node type
1401  * @pivots: The array of pivots in the node
1402  * @max: The maximum value in the node
1403  *
1404  * Uses metadata to find the end of the data when possible.
1405  * Return: The zero indexed last slot with data (may be null).
1406  */
1407 static __always_inline unsigned char ma_data_end(struct maple_node *node,
1408                 enum maple_type type, unsigned long *pivots, unsigned long max)
1409 {
1410         unsigned char offset;
1411 
1412         if (!pivots)
1413                 return 0;
1414 
1415         if (type == maple_arange_64)
1416                 return ma_meta_end(node, type);
1417 
1418         offset = mt_pivots[type] - 1;
1419         if (likely(!pivots[offset]))
1420                 return ma_meta_end(node, type);
1421 
1422         if (likely(pivots[offset] == max))
1423                 return offset;
1424 
1425         return mt_pivots[type];
1426 }
1427 
1428 /*
1429  * mas_data_end() - Find the end of the data (slot).
1430  * @mas: the maple state
1431  *
1432  * This method is optimized to check the metadata of a node if the node type
1433  * supports data end metadata.
1434  *
1435  * Return: The zero indexed last slot with data (may be null).
1436  */
1437 static inline unsigned char mas_data_end(struct ma_state *mas)
1438 {
1439         enum maple_type type;
1440         struct maple_node *node;
1441         unsigned char offset;
1442         unsigned long *pivots;
1443 
1444         type = mte_node_type(mas->node);
1445         node = mas_mn(mas);
1446         if (type == maple_arange_64)
1447                 return ma_meta_end(node, type);
1448 
1449         pivots = ma_pivots(node, type);
1450         if (unlikely(ma_dead_node(node)))
1451                 return 0;
1452 
1453         offset = mt_pivots[type] - 1;
1454         if (likely(!pivots[offset]))
1455                 return ma_meta_end(node, type);
1456 
1457         if (likely(pivots[offset] == mas->max))
1458                 return offset;
1459 
1460         return mt_pivots[type];
1461 }
1462 
1463 /*
1464  * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1465  * @mas - the maple state
1466  *
1467  * Return: The maximum gap in the leaf.
1468  */
1469 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1470 {
1471         enum maple_type mt;
1472         unsigned long pstart, gap, max_gap;
1473         struct maple_node *mn;
1474         unsigned long *pivots;
1475         void __rcu **slots;
1476         unsigned char i;
1477         unsigned char max_piv;
1478 
1479         mt = mte_node_type(mas->node);
1480         mn = mas_mn(mas);
1481         slots = ma_slots(mn, mt);
1482         max_gap = 0;
1483         if (unlikely(ma_is_dense(mt))) {
1484                 gap = 0;
1485                 for (i = 0; i < mt_slots[mt]; i++) {
1486                         if (slots[i]) {
1487                                 if (gap > max_gap)
1488                                         max_gap = gap;
1489                                 gap = 0;
1490                         } else {
1491                                 gap++;
1492                         }
1493                 }
1494                 if (gap > max_gap)
1495                         max_gap = gap;
1496                 return max_gap;
1497         }
1498 
1499         /*
1500          * Check the first implied pivot optimizes the loop below and slot 1 may
1501          * be skipped if there is a gap in slot 0.
1502          */
1503         pivots = ma_pivots(mn, mt);
1504         if (likely(!slots[0])) {
1505                 max_gap = pivots[0] - mas->min + 1;
1506                 i = 2;
1507         } else {
1508                 i = 1;
1509         }
1510 
1511         /* reduce max_piv as the special case is checked before the loop */
1512         max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1513         /*
1514          * Check end implied pivot which can only be a gap on the right most
1515          * node.
1516          */
1517         if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1518                 gap = ULONG_MAX - pivots[max_piv];
1519                 if (gap > max_gap)
1520                         max_gap = gap;
1521 
1522                 if (max_gap > pivots[max_piv] - mas->min)
1523                         return max_gap;
1524         }
1525 
1526         for (; i <= max_piv; i++) {
1527                 /* data == no gap. */
1528                 if (likely(slots[i]))
1529                         continue;
1530 
1531                 pstart = pivots[i - 1];
1532                 gap = pivots[i] - pstart;
1533                 if (gap > max_gap)
1534                         max_gap = gap;
1535 
1536                 /* There cannot be two gaps in a row. */
1537                 i++;
1538         }
1539         return max_gap;
1540 }
1541 
1542 /*
1543  * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1544  * @node: The maple node
1545  * @gaps: The pointer to the gaps
1546  * @mt: The maple node type
1547  * @*off: Pointer to store the offset location of the gap.
1548  *
1549  * Uses the metadata data end to scan backwards across set gaps.
1550  *
1551  * Return: The maximum gap value
1552  */
1553 static inline unsigned long
1554 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1555             unsigned char *off)
1556 {
1557         unsigned char offset, i;
1558         unsigned long max_gap = 0;
1559 
1560         i = offset = ma_meta_end(node, mt);
1561         do {
1562                 if (gaps[i] > max_gap) {
1563                         max_gap = gaps[i];
1564                         offset = i;
1565                 }
1566         } while (i--);
1567 
1568         *off = offset;
1569         return max_gap;
1570 }
1571 
1572 /*
1573  * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1574  * @mas: The maple state.
1575  *
1576  * Return: The gap value.
1577  */
1578 static inline unsigned long mas_max_gap(struct ma_state *mas)
1579 {
1580         unsigned long *gaps;
1581         unsigned char offset;
1582         enum maple_type mt;
1583         struct maple_node *node;
1584 
1585         mt = mte_node_type(mas->node);
1586         if (ma_is_leaf(mt))
1587                 return mas_leaf_max_gap(mas);
1588 
1589         node = mas_mn(mas);
1590         MAS_BUG_ON(mas, mt != maple_arange_64);
1591         offset = ma_meta_gap(node);
1592         gaps = ma_gaps(node, mt);
1593         return gaps[offset];
1594 }
1595 
1596 /*
1597  * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1598  * @mas: The maple state
1599  * @offset: The gap offset in the parent to set
1600  * @new: The new gap value.
1601  *
1602  * Set the parent gap then continue to set the gap upwards, using the metadata
1603  * of the parent to see if it is necessary to check the node above.
1604  */
1605 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1606                 unsigned long new)
1607 {
1608         unsigned long meta_gap = 0;
1609         struct maple_node *pnode;
1610         struct maple_enode *penode;
1611         unsigned long *pgaps;
1612         unsigned char meta_offset;
1613         enum maple_type pmt;
1614 
1615         pnode = mte_parent(mas->node);
1616         pmt = mas_parent_type(mas, mas->node);
1617         penode = mt_mk_node(pnode, pmt);
1618         pgaps = ma_gaps(pnode, pmt);
1619 
1620 ascend:
1621         MAS_BUG_ON(mas, pmt != maple_arange_64);
1622         meta_offset = ma_meta_gap(pnode);
1623         meta_gap = pgaps[meta_offset];
1624 
1625         pgaps[offset] = new;
1626 
1627         if (meta_gap == new)
1628                 return;
1629 
1630         if (offset != meta_offset) {
1631                 if (meta_gap > new)
1632                         return;
1633 
1634                 ma_set_meta_gap(pnode, pmt, offset);
1635         } else if (new < meta_gap) {
1636                 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1637                 ma_set_meta_gap(pnode, pmt, meta_offset);
1638         }
1639 
1640         if (ma_is_root(pnode))
1641                 return;
1642 
1643         /* Go to the parent node. */
1644         pnode = mte_parent(penode);
1645         pmt = mas_parent_type(mas, penode);
1646         pgaps = ma_gaps(pnode, pmt);
1647         offset = mte_parent_slot(penode);
1648         penode = mt_mk_node(pnode, pmt);
1649         goto ascend;
1650 }
1651 
1652 /*
1653  * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1654  * @mas - the maple state.
1655  */
1656 static inline void mas_update_gap(struct ma_state *mas)
1657 {
1658         unsigned char pslot;
1659         unsigned long p_gap;
1660         unsigned long max_gap;
1661 
1662         if (!mt_is_alloc(mas->tree))
1663                 return;
1664 
1665         if (mte_is_root(mas->node))
1666                 return;
1667 
1668         max_gap = mas_max_gap(mas);
1669 
1670         pslot = mte_parent_slot(mas->node);
1671         p_gap = ma_gaps(mte_parent(mas->node),
1672                         mas_parent_type(mas, mas->node))[pslot];
1673 
1674         if (p_gap != max_gap)
1675                 mas_parent_gap(mas, pslot, max_gap);
1676 }
1677 
1678 /*
1679  * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1680  * @parent with the slot encoded.
1681  * @mas - the maple state (for the tree)
1682  * @parent - the maple encoded node containing the children.
1683  */
1684 static inline void mas_adopt_children(struct ma_state *mas,
1685                 struct maple_enode *parent)
1686 {
1687         enum maple_type type = mte_node_type(parent);
1688         struct maple_node *node = mte_to_node(parent);
1689         void __rcu **slots = ma_slots(node, type);
1690         unsigned long *pivots = ma_pivots(node, type);
1691         struct maple_enode *child;
1692         unsigned char offset;
1693 
1694         offset = ma_data_end(node, type, pivots, mas->max);
1695         do {
1696                 child = mas_slot_locked(mas, slots, offset);
1697                 mas_set_parent(mas, child, parent, offset);
1698         } while (offset--);
1699 }
1700 
1701 /*
1702  * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1703  * node as dead.
1704  * @mas - the maple state with the new node
1705  * @old_enode - The old maple encoded node to replace.
1706  */
1707 static inline void mas_put_in_tree(struct ma_state *mas,
1708                 struct maple_enode *old_enode)
1709         __must_hold(mas->tree->ma_lock)
1710 {
1711         unsigned char offset;
1712         void __rcu **slots;
1713 
1714         if (mte_is_root(mas->node)) {
1715                 mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
1716                 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1717                 mas_set_height(mas);
1718         } else {
1719 
1720                 offset = mte_parent_slot(mas->node);
1721                 slots = ma_slots(mte_parent(mas->node),
1722                                  mas_parent_type(mas, mas->node));
1723                 rcu_assign_pointer(slots[offset], mas->node);
1724         }
1725 
1726         mte_set_node_dead(old_enode);
1727 }
1728 
1729 /*
1730  * mas_replace_node() - Replace a node by putting it in the tree, marking it
1731  * dead, and freeing it.
1732  * the parent encoding to locate the maple node in the tree.
1733  * @mas - the ma_state with @mas->node pointing to the new node.
1734  * @old_enode - The old maple encoded node.
1735  */
1736 static inline void mas_replace_node(struct ma_state *mas,
1737                 struct maple_enode *old_enode)
1738         __must_hold(mas->tree->ma_lock)
1739 {
1740         mas_put_in_tree(mas, old_enode);
1741         mas_free(mas, old_enode);
1742 }
1743 
1744 /*
1745  * mas_find_child() - Find a child who has the parent @mas->node.
1746  * @mas: the maple state with the parent.
1747  * @child: the maple state to store the child.
1748  */
1749 static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
1750         __must_hold(mas->tree->ma_lock)
1751 {
1752         enum maple_type mt;
1753         unsigned char offset;
1754         unsigned char end;
1755         unsigned long *pivots;
1756         struct maple_enode *entry;
1757         struct maple_node *node;
1758         void __rcu **slots;
1759 
1760         mt = mte_node_type(mas->node);
1761         node = mas_mn(mas);
1762         slots = ma_slots(node, mt);
1763         pivots = ma_pivots(node, mt);
1764         end = ma_data_end(node, mt, pivots, mas->max);
1765         for (offset = mas->offset; offset <= end; offset++) {
1766                 entry = mas_slot_locked(mas, slots, offset);
1767                 if (mte_parent(entry) == node) {
1768                         *child = *mas;
1769                         mas->offset = offset + 1;
1770                         child->offset = offset;
1771                         mas_descend(child);
1772                         child->offset = 0;
1773                         return true;
1774                 }
1775         }
1776         return false;
1777 }
1778 
1779 /*
1780  * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1781  * old data or set b_node->b_end.
1782  * @b_node: the maple_big_node
1783  * @shift: the shift count
1784  */
1785 static inline void mab_shift_right(struct maple_big_node *b_node,
1786                                  unsigned char shift)
1787 {
1788         unsigned long size = b_node->b_end * sizeof(unsigned long);
1789 
1790         memmove(b_node->pivot + shift, b_node->pivot, size);
1791         memmove(b_node->slot + shift, b_node->slot, size);
1792         if (b_node->type == maple_arange_64)
1793                 memmove(b_node->gap + shift, b_node->gap, size);
1794 }
1795 
1796 /*
1797  * mab_middle_node() - Check if a middle node is needed (unlikely)
1798  * @b_node: the maple_big_node that contains the data.
1799  * @size: the amount of data in the b_node
1800  * @split: the potential split location
1801  * @slot_count: the size that can be stored in a single node being considered.
1802  *
1803  * Return: true if a middle node is required.
1804  */
1805 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1806                                    unsigned char slot_count)
1807 {
1808         unsigned char size = b_node->b_end;
1809 
1810         if (size >= 2 * slot_count)
1811                 return true;
1812 
1813         if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1814                 return true;
1815 
1816         return false;
1817 }
1818 
1819 /*
1820  * mab_no_null_split() - ensure the split doesn't fall on a NULL
1821  * @b_node: the maple_big_node with the data
1822  * @split: the suggested split location
1823  * @slot_count: the number of slots in the node being considered.
1824  *
1825  * Return: the split location.
1826  */
1827 static inline int mab_no_null_split(struct maple_big_node *b_node,
1828                                     unsigned char split, unsigned char slot_count)
1829 {
1830         if (!b_node->slot[split]) {
1831                 /*
1832                  * If the split is less than the max slot && the right side will
1833                  * still be sufficient, then increment the split on NULL.
1834                  */
1835                 if ((split < slot_count - 1) &&
1836                     (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1837                         split++;
1838                 else
1839                         split--;
1840         }
1841         return split;
1842 }
1843 
1844 /*
1845  * mab_calc_split() - Calculate the split location and if there needs to be two
1846  * splits.
1847  * @bn: The maple_big_node with the data
1848  * @mid_split: The second split, if required.  0 otherwise.
1849  *
1850  * Return: The first split location.  The middle split is set in @mid_split.
1851  */
1852 static inline int mab_calc_split(struct ma_state *mas,
1853          struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1854 {
1855         unsigned char b_end = bn->b_end;
1856         int split = b_end / 2; /* Assume equal split. */
1857         unsigned char slot_min, slot_count = mt_slots[bn->type];
1858 
1859         /*
1860          * To support gap tracking, all NULL entries are kept together and a node cannot
1861          * end on a NULL entry, with the exception of the left-most leaf.  The
1862          * limitation means that the split of a node must be checked for this condition
1863          * and be able to put more data in one direction or the other.
1864          */
1865         if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1866                 *mid_split = 0;
1867                 split = b_end - mt_min_slots[bn->type];
1868 
1869                 if (!ma_is_leaf(bn->type))
1870                         return split;
1871 
1872                 mas->mas_flags |= MA_STATE_REBALANCE;
1873                 if (!bn->slot[split])
1874                         split--;
1875                 return split;
1876         }
1877 
1878         /*
1879          * Although extremely rare, it is possible to enter what is known as the 3-way
1880          * split scenario.  The 3-way split comes about by means of a store of a range
1881          * that overwrites the end and beginning of two full nodes.  The result is a set
1882          * of entries that cannot be stored in 2 nodes.  Sometimes, these two nodes can
1883          * also be located in different parent nodes which are also full.  This can
1884          * carry upwards all the way to the root in the worst case.
1885          */
1886         if (unlikely(mab_middle_node(bn, split, slot_count))) {
1887                 split = b_end / 3;
1888                 *mid_split = split * 2;
1889         } else {
1890                 slot_min = mt_min_slots[bn->type];
1891 
1892                 *mid_split = 0;
1893                 /*
1894                  * Avoid having a range less than the slot count unless it
1895                  * causes one node to be deficient.
1896                  * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1897                  */
1898                 while ((split < slot_count - 1) &&
1899                        ((bn->pivot[split] - min) < slot_count - 1) &&
1900                        (b_end - split > slot_min))
1901                         split++;
1902         }
1903 
1904         /* Avoid ending a node on a NULL entry */
1905         split = mab_no_null_split(bn, split, slot_count);
1906 
1907         if (unlikely(*mid_split))
1908                 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1909 
1910         return split;
1911 }
1912 
1913 /*
1914  * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1915  * and set @b_node->b_end to the next free slot.
1916  * @mas: The maple state
1917  * @mas_start: The starting slot to copy
1918  * @mas_end: The end slot to copy (inclusively)
1919  * @b_node: The maple_big_node to place the data
1920  * @mab_start: The starting location in maple_big_node to store the data.
1921  */
1922 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1923                         unsigned char mas_end, struct maple_big_node *b_node,
1924                         unsigned char mab_start)
1925 {
1926         enum maple_type mt;
1927         struct maple_node *node;
1928         void __rcu **slots;
1929         unsigned long *pivots, *gaps;
1930         int i = mas_start, j = mab_start;
1931         unsigned char piv_end;
1932 
1933         node = mas_mn(mas);
1934         mt = mte_node_type(mas->node);
1935         pivots = ma_pivots(node, mt);
1936         if (!i) {
1937                 b_node->pivot[j] = pivots[i++];
1938                 if (unlikely(i > mas_end))
1939                         goto complete;
1940                 j++;
1941         }
1942 
1943         piv_end = min(mas_end, mt_pivots[mt]);
1944         for (; i < piv_end; i++, j++) {
1945                 b_node->pivot[j] = pivots[i];
1946                 if (unlikely(!b_node->pivot[j]))
1947                         break;
1948 
1949                 if (unlikely(mas->max == b_node->pivot[j]))
1950                         goto complete;
1951         }
1952 
1953         if (likely(i <= mas_end))
1954                 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1955 
1956 complete:
1957         b_node->b_end = ++j;
1958         j -= mab_start;
1959         slots = ma_slots(node, mt);
1960         memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1961         if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1962                 gaps = ma_gaps(node, mt);
1963                 memcpy(b_node->gap + mab_start, gaps + mas_start,
1964                        sizeof(unsigned long) * j);
1965         }
1966 }
1967 
1968 /*
1969  * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1970  * @node: The maple node
1971  * @mt: The maple type
1972  * @end: The node end
1973  */
1974 static inline void mas_leaf_set_meta(struct maple_node *node,
1975                 enum maple_type mt, unsigned char end)
1976 {
1977         if (end < mt_slots[mt] - 1)
1978                 ma_set_meta(node, mt, 0, end);
1979 }
1980 
1981 /*
1982  * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1983  * @b_node: the maple_big_node that has the data
1984  * @mab_start: the start location in @b_node.
1985  * @mab_end: The end location in @b_node (inclusively)
1986  * @mas: The maple state with the maple encoded node.
1987  */
1988 static inline void mab_mas_cp(struct maple_big_node *b_node,
1989                               unsigned char mab_start, unsigned char mab_end,
1990                               struct ma_state *mas, bool new_max)
1991 {
1992         int i, j = 0;
1993         enum maple_type mt = mte_node_type(mas->node);
1994         struct maple_node *node = mte_to_node(mas->node);
1995         void __rcu **slots = ma_slots(node, mt);
1996         unsigned long *pivots = ma_pivots(node, mt);
1997         unsigned long *gaps = NULL;
1998         unsigned char end;
1999 
2000         if (mab_end - mab_start > mt_pivots[mt])
2001                 mab_end--;
2002 
2003         if (!pivots[mt_pivots[mt] - 1])
2004                 slots[mt_pivots[mt]] = NULL;
2005 
2006         i = mab_start;
2007         do {
2008                 pivots[j++] = b_node->pivot[i++];
2009         } while (i <= mab_end && likely(b_node->pivot[i]));
2010 
2011         memcpy(slots, b_node->slot + mab_start,
2012                sizeof(void *) * (i - mab_start));
2013 
2014         if (new_max)
2015                 mas->max = b_node->pivot[i - 1];
2016 
2017         end = j - 1;
2018         if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2019                 unsigned long max_gap = 0;
2020                 unsigned char offset = 0;
2021 
2022                 gaps = ma_gaps(node, mt);
2023                 do {
2024                         gaps[--j] = b_node->gap[--i];
2025                         if (gaps[j] > max_gap) {
2026                                 offset = j;
2027                                 max_gap = gaps[j];
2028                         }
2029                 } while (j);
2030 
2031                 ma_set_meta(node, mt, offset, end);
2032         } else {
2033                 mas_leaf_set_meta(node, mt, end);
2034         }
2035 }
2036 
2037 /*
2038  * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2039  * @mas: The maple state
2040  * @end: The maple node end
2041  * @mt: The maple node type
2042  */
2043 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2044                                       enum maple_type mt)
2045 {
2046         if (!(mas->mas_flags & MA_STATE_BULK))
2047                 return;
2048 
2049         if (mte_is_root(mas->node))
2050                 return;
2051 
2052         if (end > mt_min_slots[mt]) {
2053                 mas->mas_flags &= ~MA_STATE_REBALANCE;
2054                 return;
2055         }
2056 }
2057 
2058 /*
2059  * mas_store_b_node() - Store an @entry into the b_node while also copying the
2060  * data from a maple encoded node.
2061  * @wr_mas: the maple write state
2062  * @b_node: the maple_big_node to fill with data
2063  * @offset_end: the offset to end copying
2064  *
2065  * Return: The actual end of the data stored in @b_node
2066  */
2067 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2068                 struct maple_big_node *b_node, unsigned char offset_end)
2069 {
2070         unsigned char slot;
2071         unsigned char b_end;
2072         /* Possible underflow of piv will wrap back to 0 before use. */
2073         unsigned long piv;
2074         struct ma_state *mas = wr_mas->mas;
2075 
2076         b_node->type = wr_mas->type;
2077         b_end = 0;
2078         slot = mas->offset;
2079         if (slot) {
2080                 /* Copy start data up to insert. */
2081                 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2082                 b_end = b_node->b_end;
2083                 piv = b_node->pivot[b_end - 1];
2084         } else
2085                 piv = mas->min - 1;
2086 
2087         if (piv + 1 < mas->index) {
2088                 /* Handle range starting after old range */
2089                 b_node->slot[b_end] = wr_mas->content;
2090                 if (!wr_mas->content)
2091                         b_node->gap[b_end] = mas->index - 1 - piv;
2092                 b_node->pivot[b_end++] = mas->index - 1;
2093         }
2094 
2095         /* Store the new entry. */
2096         mas->offset = b_end;
2097         b_node->slot[b_end] = wr_mas->entry;
2098         b_node->pivot[b_end] = mas->last;
2099 
2100         /* Appended. */
2101         if (mas->last >= mas->max)
2102                 goto b_end;
2103 
2104         /* Handle new range ending before old range ends */
2105         piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2106         if (piv > mas->last) {
2107                 if (piv == ULONG_MAX)
2108                         mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2109 
2110                 if (offset_end != slot)
2111                         wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2112                                                           offset_end);
2113 
2114                 b_node->slot[++b_end] = wr_mas->content;
2115                 if (!wr_mas->content)
2116                         b_node->gap[b_end] = piv - mas->last + 1;
2117                 b_node->pivot[b_end] = piv;
2118         }
2119 
2120         slot = offset_end + 1;
2121         if (slot > mas->end)
2122                 goto b_end;
2123 
2124         /* Copy end data to the end of the node. */
2125         mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
2126         b_node->b_end--;
2127         return;
2128 
2129 b_end:
2130         b_node->b_end = b_end;
2131 }
2132 
2133 /*
2134  * mas_prev_sibling() - Find the previous node with the same parent.
2135  * @mas: the maple state
2136  *
2137  * Return: True if there is a previous sibling, false otherwise.
2138  */
2139 static inline bool mas_prev_sibling(struct ma_state *mas)
2140 {
2141         unsigned int p_slot = mte_parent_slot(mas->node);
2142 
2143         if (mte_is_root(mas->node))
2144                 return false;
2145 
2146         if (!p_slot)
2147                 return false;
2148 
2149         mas_ascend(mas);
2150         mas->offset = p_slot - 1;
2151         mas_descend(mas);
2152         return true;
2153 }
2154 
2155 /*
2156  * mas_next_sibling() - Find the next node with the same parent.
2157  * @mas: the maple state
2158  *
2159  * Return: true if there is a next sibling, false otherwise.
2160  */
2161 static inline bool mas_next_sibling(struct ma_state *mas)
2162 {
2163         MA_STATE(parent, mas->tree, mas->index, mas->last);
2164 
2165         if (mte_is_root(mas->node))
2166                 return false;
2167 
2168         parent = *mas;
2169         mas_ascend(&parent);
2170         parent.offset = mte_parent_slot(mas->node) + 1;
2171         if (parent.offset > mas_data_end(&parent))
2172                 return false;
2173 
2174         *mas = parent;
2175         mas_descend(mas);
2176         return true;
2177 }
2178 
2179 /*
2180  * mte_node_or_none() - Set the enode and state.
2181  * @enode: The encoded maple node.
2182  *
2183  * Set the node to the enode and the status.
2184  */
2185 static inline void mas_node_or_none(struct ma_state *mas,
2186                 struct maple_enode *enode)
2187 {
2188         if (enode) {
2189                 mas->node = enode;
2190                 mas->status = ma_active;
2191         } else {
2192                 mas->node = NULL;
2193                 mas->status = ma_none;
2194         }
2195 }
2196 
2197 /*
2198  * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2199  *                      If @mas->index cannot be found within the containing
2200  *                      node, we traverse to the last entry in the node.
2201  * @wr_mas: The maple write state
2202  *
2203  * Uses mas_slot_locked() and does not need to worry about dead nodes.
2204  */
2205 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2206 {
2207         struct ma_state *mas = wr_mas->mas;
2208         unsigned char count, offset;
2209 
2210         if (unlikely(ma_is_dense(wr_mas->type))) {
2211                 wr_mas->r_max = wr_mas->r_min = mas->index;
2212                 mas->offset = mas->index = mas->min;
2213                 return;
2214         }
2215 
2216         wr_mas->node = mas_mn(wr_mas->mas);
2217         wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2218         count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
2219                                        wr_mas->pivots, mas->max);
2220         offset = mas->offset;
2221 
2222         while (offset < count && mas->index > wr_mas->pivots[offset])
2223                 offset++;
2224 
2225         wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2226         wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2227         wr_mas->offset_end = mas->offset = offset;
2228 }
2229 
2230 /*
2231  * mast_rebalance_next() - Rebalance against the next node
2232  * @mast: The maple subtree state
2233  * @old_r: The encoded maple node to the right (next node).
2234  */
2235 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2236 {
2237         unsigned char b_end = mast->bn->b_end;
2238 
2239         mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2240                    mast->bn, b_end);
2241         mast->orig_r->last = mast->orig_r->max;
2242 }
2243 
2244 /*
2245  * mast_rebalance_prev() - Rebalance against the previous node
2246  * @mast: The maple subtree state
2247  * @old_l: The encoded maple node to the left (previous node)
2248  */
2249 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2250 {
2251         unsigned char end = mas_data_end(mast->orig_l) + 1;
2252         unsigned char b_end = mast->bn->b_end;
2253 
2254         mab_shift_right(mast->bn, end);
2255         mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2256         mast->l->min = mast->orig_l->min;
2257         mast->orig_l->index = mast->orig_l->min;
2258         mast->bn->b_end = end + b_end;
2259         mast->l->offset += end;
2260 }
2261 
2262 /*
2263  * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2264  * the node to the right.  Checking the nodes to the right then the left at each
2265  * level upwards until root is reached.
2266  * Data is copied into the @mast->bn.
2267  * @mast: The maple_subtree_state.
2268  */
2269 static inline
2270 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2271 {
2272         struct ma_state r_tmp = *mast->orig_r;
2273         struct ma_state l_tmp = *mast->orig_l;
2274         unsigned char depth = 0;
2275 
2276         do {
2277                 mas_ascend(mast->orig_r);
2278                 mas_ascend(mast->orig_l);
2279                 depth++;
2280                 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2281                         mast->orig_r->offset++;
2282                         do {
2283                                 mas_descend(mast->orig_r);
2284                                 mast->orig_r->offset = 0;
2285                         } while (--depth);
2286 
2287                         mast_rebalance_next(mast);
2288                         *mast->orig_l = l_tmp;
2289                         return true;
2290                 } else if (mast->orig_l->offset != 0) {
2291                         mast->orig_l->offset--;
2292                         do {
2293                                 mas_descend(mast->orig_l);
2294                                 mast->orig_l->offset =
2295                                         mas_data_end(mast->orig_l);
2296                         } while (--depth);
2297 
2298                         mast_rebalance_prev(mast);
2299                         *mast->orig_r = r_tmp;
2300                         return true;
2301                 }
2302         } while (!mte_is_root(mast->orig_r->node));
2303 
2304         *mast->orig_r = r_tmp;
2305         *mast->orig_l = l_tmp;
2306         return false;
2307 }
2308 
2309 /*
2310  * mast_ascend() - Ascend the original left and right maple states.
2311  * @mast: the maple subtree state.
2312  *
2313  * Ascend the original left and right sides.  Set the offsets to point to the
2314  * data already in the new tree (@mast->l and @mast->r).
2315  */
2316 static inline void mast_ascend(struct maple_subtree_state *mast)
2317 {
2318         MA_WR_STATE(wr_mas, mast->orig_r,  NULL);
2319         mas_ascend(mast->orig_l);
2320         mas_ascend(mast->orig_r);
2321 
2322         mast->orig_r->offset = 0;
2323         mast->orig_r->index = mast->r->max;
2324         /* last should be larger than or equal to index */
2325         if (mast->orig_r->last < mast->orig_r->index)
2326                 mast->orig_r->last = mast->orig_r->index;
2327 
2328         wr_mas.type = mte_node_type(mast->orig_r->node);
2329         mas_wr_node_walk(&wr_mas);
2330         /* Set up the left side of things */
2331         mast->orig_l->offset = 0;
2332         mast->orig_l->index = mast->l->min;
2333         wr_mas.mas = mast->orig_l;
2334         wr_mas.type = mte_node_type(mast->orig_l->node);
2335         mas_wr_node_walk(&wr_mas);
2336 
2337         mast->bn->type = wr_mas.type;
2338 }
2339 
2340 /*
2341  * mas_new_ma_node() - Create and return a new maple node.  Helper function.
2342  * @mas: the maple state with the allocations.
2343  * @b_node: the maple_big_node with the type encoding.
2344  *
2345  * Use the node type from the maple_big_node to allocate a new node from the
2346  * ma_state.  This function exists mainly for code readability.
2347  *
2348  * Return: A new maple encoded node
2349  */
2350 static inline struct maple_enode
2351 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2352 {
2353         return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2354 }
2355 
2356 /*
2357  * mas_mab_to_node() - Set up right and middle nodes
2358  *
2359  * @mas: the maple state that contains the allocations.
2360  * @b_node: the node which contains the data.
2361  * @left: The pointer which will have the left node
2362  * @right: The pointer which may have the right node
2363  * @middle: the pointer which may have the middle node (rare)
2364  * @mid_split: the split location for the middle node
2365  *
2366  * Return: the split of left.
2367  */
2368 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2369         struct maple_big_node *b_node, struct maple_enode **left,
2370         struct maple_enode **right, struct maple_enode **middle,
2371         unsigned char *mid_split, unsigned long min)
2372 {
2373         unsigned char split = 0;
2374         unsigned char slot_count = mt_slots[b_node->type];
2375 
2376         *left = mas_new_ma_node(mas, b_node);
2377         *right = NULL;
2378         *middle = NULL;
2379         *mid_split = 0;
2380 
2381         if (b_node->b_end < slot_count) {
2382                 split = b_node->b_end;
2383         } else {
2384                 split = mab_calc_split(mas, b_node, mid_split, min);
2385                 *right = mas_new_ma_node(mas, b_node);
2386         }
2387 
2388         if (*mid_split)
2389                 *middle = mas_new_ma_node(mas, b_node);
2390 
2391         return split;
2392 
2393 }
2394 
2395 /*
2396  * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2397  * pointer.
2398  * @b_node - the big node to add the entry
2399  * @mas - the maple state to get the pivot (mas->max)
2400  * @entry - the entry to add, if NULL nothing happens.
2401  */
2402 static inline void mab_set_b_end(struct maple_big_node *b_node,
2403                                  struct ma_state *mas,
2404                                  void *entry)
2405 {
2406         if (!entry)
2407                 return;
2408 
2409         b_node->slot[b_node->b_end] = entry;
2410         if (mt_is_alloc(mas->tree))
2411                 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2412         b_node->pivot[b_node->b_end++] = mas->max;
2413 }
2414 
2415 /*
2416  * mas_set_split_parent() - combine_then_separate helper function.  Sets the parent
2417  * of @mas->node to either @left or @right, depending on @slot and @split
2418  *
2419  * @mas - the maple state with the node that needs a parent
2420  * @left - possible parent 1
2421  * @right - possible parent 2
2422  * @slot - the slot the mas->node was placed
2423  * @split - the split location between @left and @right
2424  */
2425 static inline void mas_set_split_parent(struct ma_state *mas,
2426                                         struct maple_enode *left,
2427                                         struct maple_enode *right,
2428                                         unsigned char *slot, unsigned char split)
2429 {
2430         if (mas_is_none(mas))
2431                 return;
2432 
2433         if ((*slot) <= split)
2434                 mas_set_parent(mas, mas->node, left, *slot);
2435         else if (right)
2436                 mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2437 
2438         (*slot)++;
2439 }
2440 
2441 /*
2442  * mte_mid_split_check() - Check if the next node passes the mid-split
2443  * @**l: Pointer to left encoded maple node.
2444  * @**m: Pointer to middle encoded maple node.
2445  * @**r: Pointer to right encoded maple node.
2446  * @slot: The offset
2447  * @*split: The split location.
2448  * @mid_split: The middle split.
2449  */
2450 static inline void mte_mid_split_check(struct maple_enode **l,
2451                                        struct maple_enode **r,
2452                                        struct maple_enode *right,
2453                                        unsigned char slot,
2454                                        unsigned char *split,
2455                                        unsigned char mid_split)
2456 {
2457         if (*r == right)
2458                 return;
2459 
2460         if (slot < mid_split)
2461                 return;
2462 
2463         *l = *r;
2464         *r = right;
2465         *split = mid_split;
2466 }
2467 
2468 /*
2469  * mast_set_split_parents() - Helper function to set three nodes parents.  Slot
2470  * is taken from @mast->l.
2471  * @mast - the maple subtree state
2472  * @left - the left node
2473  * @right - the right node
2474  * @split - the split location.
2475  */
2476 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2477                                           struct maple_enode *left,
2478                                           struct maple_enode *middle,
2479                                           struct maple_enode *right,
2480                                           unsigned char split,
2481                                           unsigned char mid_split)
2482 {
2483         unsigned char slot;
2484         struct maple_enode *l = left;
2485         struct maple_enode *r = right;
2486 
2487         if (mas_is_none(mast->l))
2488                 return;
2489 
2490         if (middle)
2491                 r = middle;
2492 
2493         slot = mast->l->offset;
2494 
2495         mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2496         mas_set_split_parent(mast->l, l, r, &slot, split);
2497 
2498         mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2499         mas_set_split_parent(mast->m, l, r, &slot, split);
2500 
2501         mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2502         mas_set_split_parent(mast->r, l, r, &slot, split);
2503 }
2504 
2505 /*
2506  * mas_topiary_node() - Dispose of a single node
2507  * @mas: The maple state for pushing nodes
2508  * @enode: The encoded maple node
2509  * @in_rcu: If the tree is in rcu mode
2510  *
2511  * The node will either be RCU freed or pushed back on the maple state.
2512  */
2513 static inline void mas_topiary_node(struct ma_state *mas,
2514                 struct ma_state *tmp_mas, bool in_rcu)
2515 {
2516         struct maple_node *tmp;
2517         struct maple_enode *enode;
2518 
2519         if (mas_is_none(tmp_mas))
2520                 return;
2521 
2522         enode = tmp_mas->node;
2523         tmp = mte_to_node(enode);
2524         mte_set_node_dead(enode);
2525         if (in_rcu)
2526                 ma_free_rcu(tmp);
2527         else
2528                 mas_push_node(mas, tmp);
2529 }
2530 
2531 /*
2532  * mas_topiary_replace() - Replace the data with new data, then repair the
2533  * parent links within the new tree.  Iterate over the dead sub-tree and collect
2534  * the dead subtrees and topiary the nodes that are no longer of use.
2535  *
2536  * The new tree will have up to three children with the correct parent.  Keep
2537  * track of the new entries as they need to be followed to find the next level
2538  * of new entries.
2539  *
2540  * The old tree will have up to three children with the old parent.  Keep track
2541  * of the old entries as they may have more nodes below replaced.  Nodes within
2542  * [index, last] are dead subtrees, others need to be freed and followed.
2543  *
2544  * @mas: The maple state pointing at the new data
2545  * @old_enode: The maple encoded node being replaced
2546  *
2547  */
2548 static inline void mas_topiary_replace(struct ma_state *mas,
2549                 struct maple_enode *old_enode)
2550 {
2551         struct ma_state tmp[3], tmp_next[3];
2552         MA_TOPIARY(subtrees, mas->tree);
2553         bool in_rcu;
2554         int i, n;
2555 
2556         /* Place data in tree & then mark node as old */
2557         mas_put_in_tree(mas, old_enode);
2558 
2559         /* Update the parent pointers in the tree */
2560         tmp[0] = *mas;
2561         tmp[0].offset = 0;
2562         tmp[1].status = ma_none;
2563         tmp[2].status = ma_none;
2564         while (!mte_is_leaf(tmp[0].node)) {
2565                 n = 0;
2566                 for (i = 0; i < 3; i++) {
2567                         if (mas_is_none(&tmp[i]))
2568                                 continue;
2569 
2570                         while (n < 3) {
2571                                 if (!mas_find_child(&tmp[i], &tmp_next[n]))
2572                                         break;
2573                                 n++;
2574                         }
2575 
2576                         mas_adopt_children(&tmp[i], tmp[i].node);
2577                 }
2578 
2579                 if (MAS_WARN_ON(mas, n == 0))
2580                         break;
2581 
2582                 while (n < 3)
2583                         tmp_next[n++].status = ma_none;
2584 
2585                 for (i = 0; i < 3; i++)
2586                         tmp[i] = tmp_next[i];
2587         }
2588 
2589         /* Collect the old nodes that need to be discarded */
2590         if (mte_is_leaf(old_enode))
2591                 return mas_free(mas, old_enode);
2592 
2593         tmp[0] = *mas;
2594         tmp[0].offset = 0;
2595         tmp[0].node = old_enode;
2596         tmp[1].status = ma_none;
2597         tmp[2].status = ma_none;
2598         in_rcu = mt_in_rcu(mas->tree);
2599         do {
2600                 n = 0;
2601                 for (i = 0; i < 3; i++) {
2602                         if (mas_is_none(&tmp[i]))
2603                                 continue;
2604 
2605                         while (n < 3) {
2606                                 if (!mas_find_child(&tmp[i], &tmp_next[n]))
2607                                         break;
2608 
2609                                 if ((tmp_next[n].min >= tmp_next->index) &&
2610                                     (tmp_next[n].max <= tmp_next->last)) {
2611                                         mat_add(&subtrees, tmp_next[n].node);
2612                                         tmp_next[n].status = ma_none;
2613                                 } else {
2614                                         n++;
2615                                 }
2616                         }
2617                 }
2618 
2619                 if (MAS_WARN_ON(mas, n == 0))
2620                         break;
2621 
2622                 while (n < 3)
2623                         tmp_next[n++].status = ma_none;
2624 
2625                 for (i = 0; i < 3; i++) {
2626                         mas_topiary_node(mas, &tmp[i], in_rcu);
2627                         tmp[i] = tmp_next[i];
2628                 }
2629         } while (!mte_is_leaf(tmp[0].node));
2630 
2631         for (i = 0; i < 3; i++)
2632                 mas_topiary_node(mas, &tmp[i], in_rcu);
2633 
2634         mas_mat_destroy(mas, &subtrees);
2635 }
2636 
2637 /*
2638  * mas_wmb_replace() - Write memory barrier and replace
2639  * @mas: The maple state
2640  * @old: The old maple encoded node that is being replaced.
2641  *
2642  * Updates gap as necessary.
2643  */
2644 static inline void mas_wmb_replace(struct ma_state *mas,
2645                 struct maple_enode *old_enode)
2646 {
2647         /* Insert the new data in the tree */
2648         mas_topiary_replace(mas, old_enode);
2649 
2650         if (mte_is_leaf(mas->node))
2651                 return;
2652 
2653         mas_update_gap(mas);
2654 }
2655 
2656 /*
2657  * mast_cp_to_nodes() - Copy data out to nodes.
2658  * @mast: The maple subtree state
2659  * @left: The left encoded maple node
2660  * @middle: The middle encoded maple node
2661  * @right: The right encoded maple node
2662  * @split: The location to split between left and (middle ? middle : right)
2663  * @mid_split: The location to split between middle and right.
2664  */
2665 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2666         struct maple_enode *left, struct maple_enode *middle,
2667         struct maple_enode *right, unsigned char split, unsigned char mid_split)
2668 {
2669         bool new_lmax = true;
2670 
2671         mas_node_or_none(mast->l, left);
2672         mas_node_or_none(mast->m, middle);
2673         mas_node_or_none(mast->r, right);
2674 
2675         mast->l->min = mast->orig_l->min;
2676         if (split == mast->bn->b_end) {
2677                 mast->l->max = mast->orig_r->max;
2678                 new_lmax = false;
2679         }
2680 
2681         mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2682 
2683         if (middle) {
2684                 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2685                 mast->m->min = mast->bn->pivot[split] + 1;
2686                 split = mid_split;
2687         }
2688 
2689         mast->r->max = mast->orig_r->max;
2690         if (right) {
2691                 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2692                 mast->r->min = mast->bn->pivot[split] + 1;
2693         }
2694 }
2695 
2696 /*
2697  * mast_combine_cp_left - Copy in the original left side of the tree into the
2698  * combined data set in the maple subtree state big node.
2699  * @mast: The maple subtree state
2700  */
2701 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2702 {
2703         unsigned char l_slot = mast->orig_l->offset;
2704 
2705         if (!l_slot)
2706                 return;
2707 
2708         mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2709 }
2710 
2711 /*
2712  * mast_combine_cp_right: Copy in the original right side of the tree into the
2713  * combined data set in the maple subtree state big node.
2714  * @mast: The maple subtree state
2715  */
2716 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2717 {
2718         if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2719                 return;
2720 
2721         mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2722                    mt_slot_count(mast->orig_r->node), mast->bn,
2723                    mast->bn->b_end);
2724         mast->orig_r->last = mast->orig_r->max;
2725 }
2726 
2727 /*
2728  * mast_sufficient: Check if the maple subtree state has enough data in the big
2729  * node to create at least one sufficient node
2730  * @mast: the maple subtree state
2731  */
2732 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2733 {
2734         if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2735                 return true;
2736 
2737         return false;
2738 }
2739 
2740 /*
2741  * mast_overflow: Check if there is too much data in the subtree state for a
2742  * single node.
2743  * @mast: The maple subtree state
2744  */
2745 static inline bool mast_overflow(struct maple_subtree_state *mast)
2746 {
2747         if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2748                 return true;
2749 
2750         return false;
2751 }
2752 
2753 static inline void *mtree_range_walk(struct ma_state *mas)
2754 {
2755         unsigned long *pivots;
2756         unsigned char offset;
2757         struct maple_node *node;
2758         struct maple_enode *next, *last;
2759         enum maple_type type;
2760         void __rcu **slots;
2761         unsigned char end;
2762         unsigned long max, min;
2763         unsigned long prev_max, prev_min;
2764 
2765         next = mas->node;
2766         min = mas->min;
2767         max = mas->max;
2768         do {
2769                 last = next;
2770                 node = mte_to_node(next);
2771                 type = mte_node_type(next);
2772                 pivots = ma_pivots(node, type);
2773                 end = ma_data_end(node, type, pivots, max);
2774                 prev_min = min;
2775                 prev_max = max;
2776                 if (pivots[0] >= mas->index) {
2777                         offset = 0;
2778                         max = pivots[0];
2779                         goto next;
2780                 }
2781 
2782                 offset = 1;
2783                 while (offset < end) {
2784                         if (pivots[offset] >= mas->index) {
2785                                 max = pivots[offset];
2786                                 break;
2787                         }
2788                         offset++;
2789                 }
2790 
2791                 min = pivots[offset - 1] + 1;
2792 next:
2793                 slots = ma_slots(node, type);
2794                 next = mt_slot(mas->tree, slots, offset);
2795                 if (unlikely(ma_dead_node(node)))
2796                         goto dead_node;
2797         } while (!ma_is_leaf(type));
2798 
2799         mas->end = end;
2800         mas->offset = offset;
2801         mas->index = min;
2802         mas->last = max;
2803         mas->min = prev_min;
2804         mas->max = prev_max;
2805         mas->node = last;
2806         return (void *)next;
2807 
2808 dead_node:
2809         mas_reset(mas);
2810         return NULL;
2811 }
2812 
2813 /*
2814  * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2815  * @mas: The starting maple state
2816  * @mast: The maple_subtree_state, keeps track of 4 maple states.
2817  * @count: The estimated count of iterations needed.
2818  *
2819  * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2820  * is hit.  First @b_node is split into two entries which are inserted into the
2821  * next iteration of the loop.  @b_node is returned populated with the final
2822  * iteration. @mas is used to obtain allocations.  orig_l_mas keeps track of the
2823  * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2824  * to account of what has been copied into the new sub-tree.  The update of
2825  * orig_l_mas->last is used in mas_consume to find the slots that will need to
2826  * be either freed or destroyed.  orig_l_mas->depth keeps track of the height of
2827  * the new sub-tree in case the sub-tree becomes the full tree.
2828  *
2829  * Return: the number of elements in b_node during the last loop.
2830  */
2831 static int mas_spanning_rebalance(struct ma_state *mas,
2832                 struct maple_subtree_state *mast, unsigned char count)
2833 {
2834         unsigned char split, mid_split;
2835         unsigned char slot = 0;
2836         struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2837         struct maple_enode *old_enode;
2838 
2839         MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2840         MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2841         MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2842 
2843         /*
2844          * The tree needs to be rebalanced and leaves need to be kept at the same level.
2845          * Rebalancing is done by use of the ``struct maple_topiary``.
2846          */
2847         mast->l = &l_mas;
2848         mast->m = &m_mas;
2849         mast->r = &r_mas;
2850         l_mas.status = r_mas.status = m_mas.status = ma_none;
2851 
2852         /* Check if this is not root and has sufficient data.  */
2853         if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
2854             unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
2855                 mast_spanning_rebalance(mast);
2856 
2857         l_mas.depth = 0;
2858 
2859         /*
2860          * Each level of the tree is examined and balanced, pushing data to the left or
2861          * right, or rebalancing against left or right nodes is employed to avoid
2862          * rippling up the tree to limit the amount of churn.  Once a new sub-section of
2863          * the tree is created, there may be a mix of new and old nodes.  The old nodes
2864          * will have the incorrect parent pointers and currently be in two trees: the
2865          * original tree and the partially new tree.  To remedy the parent pointers in
2866          * the old tree, the new data is swapped into the active tree and a walk down
2867          * the tree is performed and the parent pointers are updated.
2868          * See mas_topiary_replace() for more information.
2869          */
2870         while (count--) {
2871                 mast->bn->b_end--;
2872                 mast->bn->type = mte_node_type(mast->orig_l->node);
2873                 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
2874                                         &mid_split, mast->orig_l->min);
2875                 mast_set_split_parents(mast, left, middle, right, split,
2876                                        mid_split);
2877                 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
2878 
2879                 /*
2880                  * Copy data from next level in the tree to mast->bn from next
2881                  * iteration
2882                  */
2883                 memset(mast->bn, 0, sizeof(struct maple_big_node));
2884                 mast->bn->type = mte_node_type(left);
2885                 l_mas.depth++;
2886 
2887                 /* Root already stored in l->node. */
2888                 if (mas_is_root_limits(mast->l))
2889                         goto new_root;
2890 
2891                 mast_ascend(mast);
2892                 mast_combine_cp_left(mast);
2893                 l_mas.offset = mast->bn->b_end;
2894                 mab_set_b_end(mast->bn, &l_mas, left);
2895                 mab_set_b_end(mast->bn, &m_mas, middle);
2896                 mab_set_b_end(mast->bn, &r_mas, right);
2897 
2898                 /* Copy anything necessary out of the right node. */
2899                 mast_combine_cp_right(mast);
2900                 mast->orig_l->last = mast->orig_l->max;
2901 
2902                 if (mast_sufficient(mast))
2903                         continue;
2904 
2905                 if (mast_overflow(mast))
2906                         continue;
2907 
2908                 /* May be a new root stored in mast->bn */
2909                 if (mas_is_root_limits(mast->orig_l))
2910                         break;
2911 
2912                 mast_spanning_rebalance(mast);
2913 
2914                 /* rebalancing from other nodes may require another loop. */
2915                 if (!count)
2916                         count++;
2917         }
2918 
2919         l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
2920                                 mte_node_type(mast->orig_l->node));
2921         l_mas.depth++;
2922         mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
2923         mas_set_parent(mas, left, l_mas.node, slot);
2924         if (middle)
2925                 mas_set_parent(mas, middle, l_mas.node, ++slot);
2926 
2927         if (right)
2928                 mas_set_parent(mas, right, l_mas.node, ++slot);
2929 
2930         if (mas_is_root_limits(mast->l)) {
2931 new_root:
2932                 mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
2933                 while (!mte_is_root(mast->orig_l->node))
2934                         mast_ascend(mast);
2935         } else {
2936                 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
2937         }
2938 
2939         old_enode = mast->orig_l->node;
2940         mas->depth = l_mas.depth;
2941         mas->node = l_mas.node;
2942         mas->min = l_mas.min;
2943         mas->max = l_mas.max;
2944         mas->offset = l_mas.offset;
2945         mas_wmb_replace(mas, old_enode);
2946         mtree_range_walk(mas);
2947         return mast->bn->b_end;
2948 }
2949 
2950 /*
2951  * mas_rebalance() - Rebalance a given node.
2952  * @mas: The maple state
2953  * @b_node: The big maple node.
2954  *
2955  * Rebalance two nodes into a single node or two new nodes that are sufficient.
2956  * Continue upwards until tree is sufficient.
2957  *
2958  * Return: the number of elements in b_node during the last loop.
2959  */
2960 static inline int mas_rebalance(struct ma_state *mas,
2961                                 struct maple_big_node *b_node)
2962 {
2963         char empty_count = mas_mt_height(mas);
2964         struct maple_subtree_state mast;
2965         unsigned char shift, b_end = ++b_node->b_end;
2966 
2967         MA_STATE(l_mas, mas->tree, mas->index, mas->last);
2968         MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2969 
2970         trace_ma_op(__func__, mas);
2971 
2972         /*
2973          * Rebalancing occurs if a node is insufficient.  Data is rebalanced
2974          * against the node to the right if it exists, otherwise the node to the
2975          * left of this node is rebalanced against this node.  If rebalancing
2976          * causes just one node to be produced instead of two, then the parent
2977          * is also examined and rebalanced if it is insufficient.  Every level
2978          * tries to combine the data in the same way.  If one node contains the
2979          * entire range of the tree, then that node is used as a new root node.
2980          */
2981         mas_node_count(mas, empty_count * 2 - 1);
2982         if (mas_is_err(mas))
2983                 return 0;
2984 
2985         mast.orig_l = &l_mas;
2986         mast.orig_r = &r_mas;
2987         mast.bn = b_node;
2988         mast.bn->type = mte_node_type(mas->node);
2989 
2990         l_mas = r_mas = *mas;
2991 
2992         if (mas_next_sibling(&r_mas)) {
2993                 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
2994                 r_mas.last = r_mas.index = r_mas.max;
2995         } else {
2996                 mas_prev_sibling(&l_mas);
2997                 shift = mas_data_end(&l_mas) + 1;
2998                 mab_shift_right(b_node, shift);
2999                 mas->offset += shift;
3000                 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3001                 b_node->b_end = shift + b_end;
3002                 l_mas.index = l_mas.last = l_mas.min;
3003         }
3004 
3005         return mas_spanning_rebalance(mas, &mast, empty_count);
3006 }
3007 
3008 /*
3009  * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3010  * state.
3011  * @mas: The maple state
3012  * @end: The end of the left-most node.
3013  *
3014  * During a mass-insert event (such as forking), it may be necessary to
3015  * rebalance the left-most node when it is not sufficient.
3016  */
3017 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3018 {
3019         enum maple_type mt = mte_node_type(mas->node);
3020         struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3021         struct maple_enode *eparent, *old_eparent;
3022         unsigned char offset, tmp, split = mt_slots[mt] / 2;
3023         void __rcu **l_slots, **slots;
3024         unsigned long *l_pivs, *pivs, gap;
3025         bool in_rcu = mt_in_rcu(mas->tree);
3026 
3027         MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3028 
3029         l_mas = *mas;
3030         mas_prev_sibling(&l_mas);
3031 
3032         /* set up node. */
3033         if (in_rcu) {
3034                 /* Allocate for both left and right as well as parent. */
3035                 mas_node_count(mas, 3);
3036                 if (mas_is_err(mas))
3037                         return;
3038 
3039                 newnode = mas_pop_node(mas);
3040         } else {
3041                 newnode = &reuse;
3042         }
3043 
3044         node = mas_mn(mas);
3045         newnode->parent = node->parent;
3046         slots = ma_slots(newnode, mt);
3047         pivs = ma_pivots(newnode, mt);
3048         left = mas_mn(&l_mas);
3049         l_slots = ma_slots(left, mt);
3050         l_pivs = ma_pivots(left, mt);
3051         if (!l_slots[split])
3052                 split++;
3053         tmp = mas_data_end(&l_mas) - split;
3054 
3055         memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3056         memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3057         pivs[tmp] = l_mas.max;
3058         memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3059         memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3060 
3061         l_mas.max = l_pivs[split];
3062         mas->min = l_mas.max + 1;
3063         old_eparent = mt_mk_node(mte_parent(l_mas.node),
3064                              mas_parent_type(&l_mas, l_mas.node));
3065         tmp += end;
3066         if (!in_rcu) {
3067                 unsigned char max_p = mt_pivots[mt];
3068                 unsigned char max_s = mt_slots[mt];
3069 
3070                 if (tmp < max_p)
3071                         memset(pivs + tmp, 0,
3072                                sizeof(unsigned long) * (max_p - tmp));
3073 
3074                 if (tmp < mt_slots[mt])
3075                         memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3076 
3077                 memcpy(node, newnode, sizeof(struct maple_node));
3078                 ma_set_meta(node, mt, 0, tmp - 1);
3079                 mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3080                               l_pivs[split]);
3081 
3082                 /* Remove data from l_pivs. */
3083                 tmp = split + 1;
3084                 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3085                 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3086                 ma_set_meta(left, mt, 0, split);
3087                 eparent = old_eparent;
3088 
3089                 goto done;
3090         }
3091 
3092         /* RCU requires replacing both l_mas, mas, and parent. */
3093         mas->node = mt_mk_node(newnode, mt);
3094         ma_set_meta(newnode, mt, 0, tmp);
3095 
3096         new_left = mas_pop_node(mas);
3097         new_left->parent = left->parent;
3098         mt = mte_node_type(l_mas.node);
3099         slots = ma_slots(new_left, mt);
3100         pivs = ma_pivots(new_left, mt);
3101         memcpy(slots, l_slots, sizeof(void *) * split);
3102         memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3103         ma_set_meta(new_left, mt, 0, split);
3104         l_mas.node = mt_mk_node(new_left, mt);
3105 
3106         /* replace parent. */
3107         offset = mte_parent_slot(mas->node);
3108         mt = mas_parent_type(&l_mas, l_mas.node);
3109         parent = mas_pop_node(mas);
3110         slots = ma_slots(parent, mt);
3111         pivs = ma_pivots(parent, mt);
3112         memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
3113         rcu_assign_pointer(slots[offset], mas->node);
3114         rcu_assign_pointer(slots[offset - 1], l_mas.node);
3115         pivs[offset - 1] = l_mas.max;
3116         eparent = mt_mk_node(parent, mt);
3117 done:
3118         gap = mas_leaf_max_gap(mas);
3119         mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3120         gap = mas_leaf_max_gap(&l_mas);
3121         mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3122         mas_ascend(mas);
3123 
3124         if (in_rcu) {
3125                 mas_replace_node(mas, old_eparent);
3126                 mas_adopt_children(mas, mas->node);
3127         }
3128 
3129         mas_update_gap(mas);
3130 }
3131 
3132 /*
3133  * mas_split_final_node() - Split the final node in a subtree operation.
3134  * @mast: the maple subtree state
3135  * @mas: The maple state
3136  * @height: The height of the tree in case it's a new root.
3137  */
3138 static inline void mas_split_final_node(struct maple_subtree_state *mast,
3139                                         struct ma_state *mas, int height)
3140 {
3141         struct maple_enode *ancestor;
3142 
3143         if (mte_is_root(mas->node)) {
3144                 if (mt_is_alloc(mas->tree))
3145                         mast->bn->type = maple_arange_64;
3146                 else
3147                         mast->bn->type = maple_range_64;
3148                 mas->depth = height;
3149         }
3150         /*
3151          * Only a single node is used here, could be root.
3152          * The Big_node data should just fit in a single node.
3153          */
3154         ancestor = mas_new_ma_node(mas, mast->bn);
3155         mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3156         mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3157         mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3158 
3159         mast->l->node = ancestor;
3160         mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3161         mas->offset = mast->bn->b_end - 1;
3162 }
3163 
3164 /*
3165  * mast_fill_bnode() - Copy data into the big node in the subtree state
3166  * @mast: The maple subtree state
3167  * @mas: the maple state
3168  * @skip: The number of entries to skip for new nodes insertion.
3169  */
3170 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3171                                          struct ma_state *mas,
3172                                          unsigned char skip)
3173 {
3174         bool cp = true;
3175         unsigned char split;
3176 
3177         memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3178         memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3179         memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3180         mast->bn->b_end = 0;
3181 
3182         if (mte_is_root(mas->node)) {
3183                 cp = false;
3184         } else {
3185                 mas_ascend(mas);
3186                 mas->offset = mte_parent_slot(mas->node);
3187         }
3188 
3189         if (cp && mast->l->offset)
3190                 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3191 
3192         split = mast->bn->b_end;
3193         mab_set_b_end(mast->bn, mast->l, mast->l->node);
3194         mast->r->offset = mast->bn->b_end;
3195         mab_set_b_end(mast->bn, mast->r, mast->r->node);
3196         if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3197                 cp = false;
3198 
3199         if (cp)
3200                 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3201                            mast->bn, mast->bn->b_end);
3202 
3203         mast->bn->b_end--;
3204         mast->bn->type = mte_node_type(mas->node);
3205 }
3206 
3207 /*
3208  * mast_split_data() - Split the data in the subtree state big node into regular
3209  * nodes.
3210  * @mast: The maple subtree state
3211  * @mas: The maple state
3212  * @split: The location to split the big node
3213  */
3214 static inline void mast_split_data(struct maple_subtree_state *mast,
3215            struct ma_state *mas, unsigned char split)
3216 {
3217         unsigned char p_slot;
3218 
3219         mab_mas_cp(mast->bn, 0, split, mast->l, true);
3220         mte_set_pivot(mast->r->node, 0, mast->r->max);
3221         mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3222         mast->l->offset = mte_parent_slot(mas->node);
3223         mast->l->max = mast->bn->pivot[split];
3224         mast->r->min = mast->l->max + 1;
3225         if (mte_is_leaf(mas->node))
3226                 return;
3227 
3228         p_slot = mast->orig_l->offset;
3229         mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3230                              &p_slot, split);
3231         mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3232                              &p_slot, split);
3233 }
3234 
3235 /*
3236  * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3237  * data to the right or left node if there is room.
3238  * @mas: The maple state
3239  * @height: The current height of the maple state
3240  * @mast: The maple subtree state
3241  * @left: Push left or not.
3242  *
3243  * Keeping the height of the tree low means faster lookups.
3244  *
3245  * Return: True if pushed, false otherwise.
3246  */
3247 static inline bool mas_push_data(struct ma_state *mas, int height,
3248                                  struct maple_subtree_state *mast, bool left)
3249 {
3250         unsigned char slot_total = mast->bn->b_end;
3251         unsigned char end, space, split;
3252 
3253         MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3254         tmp_mas = *mas;
3255         tmp_mas.depth = mast->l->depth;
3256 
3257         if (left && !mas_prev_sibling(&tmp_mas))
3258                 return false;
3259         else if (!left && !mas_next_sibling(&tmp_mas))
3260                 return false;
3261 
3262         end = mas_data_end(&tmp_mas);
3263         slot_total += end;
3264         space = 2 * mt_slot_count(mas->node) - 2;
3265         /* -2 instead of -1 to ensure there isn't a triple split */
3266         if (ma_is_leaf(mast->bn->type))
3267                 space--;
3268 
3269         if (mas->max == ULONG_MAX)
3270                 space--;
3271 
3272         if (slot_total >= space)
3273                 return false;
3274 
3275         /* Get the data; Fill mast->bn */
3276         mast->bn->b_end++;
3277         if (left) {
3278                 mab_shift_right(mast->bn, end + 1);
3279                 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3280                 mast->bn->b_end = slot_total + 1;
3281         } else {
3282                 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3283         }
3284 
3285         /* Configure mast for splitting of mast->bn */
3286         split = mt_slots[mast->bn->type] - 2;
3287         if (left) {
3288                 /*  Switch mas to prev node  */
3289                 *mas = tmp_mas;
3290                 /* Start using mast->l for the left side. */
3291                 tmp_mas.node = mast->l->node;
3292                 *mast->l = tmp_mas;
3293         } else {
3294                 tmp_mas.node = mast->r->node;
3295                 *mast->r = tmp_mas;
3296                 split = slot_total - split;
3297         }
3298         split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3299         /* Update parent slot for split calculation. */
3300         if (left)
3301                 mast->orig_l->offset += end + 1;
3302 
3303         mast_split_data(mast, mas, split);
3304         mast_fill_bnode(mast, mas, 2);
3305         mas_split_final_node(mast, mas, height + 1);
3306         return true;
3307 }
3308 
3309 /*
3310  * mas_split() - Split data that is too big for one node into two.
3311  * @mas: The maple state
3312  * @b_node: The maple big node
3313  * Return: 1 on success, 0 on failure.
3314  */
3315 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3316 {
3317         struct maple_subtree_state mast;
3318         int height = 0;
3319         unsigned char mid_split, split = 0;
3320         struct maple_enode *old;
3321 
3322         /*
3323          * Splitting is handled differently from any other B-tree; the Maple
3324          * Tree splits upwards.  Splitting up means that the split operation
3325          * occurs when the walk of the tree hits the leaves and not on the way
3326          * down.  The reason for splitting up is that it is impossible to know
3327          * how much space will be needed until the leaf is (or leaves are)
3328          * reached.  Since overwriting data is allowed and a range could
3329          * overwrite more than one range or result in changing one entry into 3
3330          * entries, it is impossible to know if a split is required until the
3331          * data is examined.
3332          *
3333          * Splitting is a balancing act between keeping allocations to a minimum
3334          * and avoiding a 'jitter' event where a tree is expanded to make room
3335          * for an entry followed by a contraction when the entry is removed.  To
3336          * accomplish the balance, there are empty slots remaining in both left
3337          * and right nodes after a split.
3338          */
3339         MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3340         MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3341         MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3342         MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3343 
3344         trace_ma_op(__func__, mas);
3345         mas->depth = mas_mt_height(mas);
3346         /* Allocation failures will happen early. */
3347         mas_node_count(mas, 1 + mas->depth * 2);
3348         if (mas_is_err(mas))
3349                 return 0;
3350 
3351         mast.l = &l_mas;
3352         mast.r = &r_mas;
3353         mast.orig_l = &prev_l_mas;
3354         mast.orig_r = &prev_r_mas;
3355         mast.bn = b_node;
3356 
3357         while (height++ <= mas->depth) {
3358                 if (mt_slots[b_node->type] > b_node->b_end) {
3359                         mas_split_final_node(&mast, mas, height);
3360                         break;
3361                 }
3362 
3363                 l_mas = r_mas = *mas;
3364                 l_mas.node = mas_new_ma_node(mas, b_node);
3365                 r_mas.node = mas_new_ma_node(mas, b_node);
3366                 /*
3367                  * Another way that 'jitter' is avoided is to terminate a split up early if the
3368                  * left or right node has space to spare.  This is referred to as "pushing left"
3369                  * or "pushing right" and is similar to the B* tree, except the nodes left or
3370                  * right can rarely be reused due to RCU, but the ripple upwards is halted which
3371                  * is a significant savings.
3372                  */
3373                 /* Try to push left. */
3374                 if (mas_push_data(mas, height, &mast, true))
3375                         break;
3376                 /* Try to push right. */
3377                 if (mas_push_data(mas, height, &mast, false))
3378                         break;
3379 
3380                 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3381                 mast_split_data(&mast, mas, split);
3382                 /*
3383                  * Usually correct, mab_mas_cp in the above call overwrites
3384                  * r->max.
3385                  */
3386                 mast.r->max = mas->max;
3387                 mast_fill_bnode(&mast, mas, 1);
3388                 prev_l_mas = *mast.l;
3389                 prev_r_mas = *mast.r;
3390         }
3391 
3392         /* Set the original node as dead */
3393         old = mas->node;
3394         mas->node = l_mas.node;
3395         mas_wmb_replace(mas, old);
3396         mtree_range_walk(mas);
3397         return 1;
3398 }
3399 
3400 /*
3401  * mas_reuse_node() - Reuse the node to store the data.
3402  * @wr_mas: The maple write state
3403  * @bn: The maple big node
3404  * @end: The end of the data.
3405  *
3406  * Will always return false in RCU mode.
3407  *
3408  * Return: True if node was reused, false otherwise.
3409  */
3410 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3411                           struct maple_big_node *bn, unsigned char end)
3412 {
3413         /* Need to be rcu safe. */
3414         if (mt_in_rcu(wr_mas->mas->tree))
3415                 return false;
3416 
3417         if (end > bn->b_end) {
3418                 int clear = mt_slots[wr_mas->type] - bn->b_end;
3419 
3420                 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3421                 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3422         }
3423         mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3424         return true;
3425 }
3426 
3427 /*
3428  * mas_commit_b_node() - Commit the big node into the tree.
3429  * @wr_mas: The maple write state
3430  * @b_node: The maple big node
3431  * @end: The end of the data.
3432  */
3433 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3434                             struct maple_big_node *b_node, unsigned char end)
3435 {
3436         struct maple_node *node;
3437         struct maple_enode *old_enode;
3438         unsigned char b_end = b_node->b_end;
3439         enum maple_type b_type = b_node->type;
3440 
3441         old_enode = wr_mas->mas->node;
3442         if ((b_end < mt_min_slots[b_type]) &&
3443             (!mte_is_root(old_enode)) &&
3444             (mas_mt_height(wr_mas->mas) > 1))
3445                 return mas_rebalance(wr_mas->mas, b_node);
3446 
3447         if (b_end >= mt_slots[b_type])
3448                 return mas_split(wr_mas->mas, b_node);
3449 
3450         if (mas_reuse_node(wr_mas, b_node, end))
3451                 goto reuse_node;
3452 
3453         mas_node_count(wr_mas->mas, 1);
3454         if (mas_is_err(wr_mas->mas))
3455                 return 0;
3456 
3457         node = mas_pop_node(wr_mas->mas);
3458         node->parent = mas_mn(wr_mas->mas)->parent;
3459         wr_mas->mas->node = mt_mk_node(node, b_type);
3460         mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3461         mas_replace_node(wr_mas->mas, old_enode);
3462 reuse_node:
3463         mas_update_gap(wr_mas->mas);
3464         wr_mas->mas->end = b_end;
3465         return 1;
3466 }
3467 
3468 /*
3469  * mas_root_expand() - Expand a root to a node
3470  * @mas: The maple state
3471  * @entry: The entry to store into the tree
3472  */
3473 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3474 {
3475         void *contents = mas_root_locked(mas);
3476         enum maple_type type = maple_leaf_64;
3477         struct maple_node *node;
3478         void __rcu **slots;
3479         unsigned long *pivots;
3480         int slot = 0;
3481 
3482         mas_node_count(mas, 1);
3483         if (unlikely(mas_is_err(mas)))
3484                 return 0;
3485 
3486         node = mas_pop_node(mas);
3487         pivots = ma_pivots(node, type);
3488         slots = ma_slots(node, type);
3489         node->parent = ma_parent_ptr(mas_tree_parent(mas));
3490         mas->node = mt_mk_node(node, type);
3491         mas->status = ma_active;
3492 
3493         if (mas->index) {
3494                 if (contents) {
3495                         rcu_assign_pointer(slots[slot], contents);
3496                         if (likely(mas->index > 1))
3497                                 slot++;
3498                 }
3499                 pivots[slot++] = mas->index - 1;
3500         }
3501 
3502         rcu_assign_pointer(slots[slot], entry);
3503         mas->offset = slot;
3504         pivots[slot] = mas->last;
3505         if (mas->last != ULONG_MAX)
3506                 pivots[++slot] = ULONG_MAX;
3507 
3508         mas->depth = 1;
3509         mas_set_height(mas);
3510         ma_set_meta(node, maple_leaf_64, 0, slot);
3511         /* swap the new root into the tree */
3512         rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3513         return slot;
3514 }
3515 
3516 static inline void mas_store_root(struct ma_state *mas, void *entry)
3517 {
3518         if (likely((mas->last != 0) || (mas->index != 0)))
3519                 mas_root_expand(mas, entry);
3520         else if (((unsigned long) (entry) & 3) == 2)
3521                 mas_root_expand(mas, entry);
3522         else {
3523                 rcu_assign_pointer(mas->tree->ma_root, entry);
3524                 mas->status = ma_start;
3525         }
3526 }
3527 
3528 /*
3529  * mas_is_span_wr() - Check if the write needs to be treated as a write that
3530  * spans the node.
3531  * @mas: The maple state
3532  * @piv: The pivot value being written
3533  * @type: The maple node type
3534  * @entry: The data to write
3535  *
3536  * Spanning writes are writes that start in one node and end in another OR if
3537  * the write of a %NULL will cause the node to end with a %NULL.
3538  *
3539  * Return: True if this is a spanning write, false otherwise.
3540  */
3541 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3542 {
3543         unsigned long max = wr_mas->r_max;
3544         unsigned long last = wr_mas->mas->last;
3545         enum maple_type type = wr_mas->type;
3546         void *entry = wr_mas->entry;
3547 
3548         /* Contained in this pivot, fast path */
3549         if (last < max)
3550                 return false;
3551 
3552         if (ma_is_leaf(type)) {
3553                 max = wr_mas->mas->max;
3554                 if (last < max)
3555                         return false;
3556         }
3557 
3558         if (last == max) {
3559                 /*
3560                  * The last entry of leaf node cannot be NULL unless it is the
3561                  * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3562                  */
3563                 if (entry || last == ULONG_MAX)
3564                         return false;
3565         }
3566 
3567         trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3568         return true;
3569 }
3570 
3571 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3572 {
3573         wr_mas->type = mte_node_type(wr_mas->mas->node);
3574         mas_wr_node_walk(wr_mas);
3575         wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3576 }
3577 
3578 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3579 {
3580         wr_mas->mas->max = wr_mas->r_max;
3581         wr_mas->mas->min = wr_mas->r_min;
3582         wr_mas->mas->node = wr_mas->content;
3583         wr_mas->mas->offset = 0;
3584         wr_mas->mas->depth++;
3585 }
3586 /*
3587  * mas_wr_walk() - Walk the tree for a write.
3588  * @wr_mas: The maple write state
3589  *
3590  * Uses mas_slot_locked() and does not need to worry about dead nodes.
3591  *
3592  * Return: True if it's contained in a node, false on spanning write.
3593  */
3594 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3595 {
3596         struct ma_state *mas = wr_mas->mas;
3597 
3598         while (true) {
3599                 mas_wr_walk_descend(wr_mas);
3600                 if (unlikely(mas_is_span_wr(wr_mas)))
3601                         return false;
3602 
3603                 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3604                                                   mas->offset);
3605                 if (ma_is_leaf(wr_mas->type))
3606                         return true;
3607 
3608                 mas_wr_walk_traverse(wr_mas);
3609         }
3610 
3611         return true;
3612 }
3613 
3614 static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
3615 {
3616         struct ma_state *mas = wr_mas->mas;
3617 
3618         while (true) {
3619                 mas_wr_walk_descend(wr_mas);
3620                 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3621                                                   mas->offset);
3622                 if (ma_is_leaf(wr_mas->type))
3623                         return;
3624                 mas_wr_walk_traverse(wr_mas);
3625         }
3626 }
3627 /*
3628  * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3629  * @l_wr_mas: The left maple write state
3630  * @r_wr_mas: The right maple write state
3631  */
3632 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3633                                             struct ma_wr_state *r_wr_mas)
3634 {
3635         struct ma_state *r_mas = r_wr_mas->mas;
3636         struct ma_state *l_mas = l_wr_mas->mas;
3637         unsigned char l_slot;
3638 
3639         l_slot = l_mas->offset;
3640         if (!l_wr_mas->content)
3641                 l_mas->index = l_wr_mas->r_min;
3642 
3643         if ((l_mas->index == l_wr_mas->r_min) &&
3644                  (l_slot &&
3645                   !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3646                 if (l_slot > 1)
3647                         l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3648                 else
3649                         l_mas->index = l_mas->min;
3650 
3651                 l_mas->offset = l_slot - 1;
3652         }
3653 
3654         if (!r_wr_mas->content) {
3655                 if (r_mas->last < r_wr_mas->r_max)
3656                         r_mas->last = r_wr_mas->r_max;
3657                 r_mas->offset++;
3658         } else if ((r_mas->last == r_wr_mas->r_max) &&
3659             (r_mas->last < r_mas->max) &&
3660             !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3661                 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3662                                              r_wr_mas->type, r_mas->offset + 1);
3663                 r_mas->offset++;
3664         }
3665 }
3666 
3667 static inline void *mas_state_walk(struct ma_state *mas)
3668 {
3669         void *entry;
3670 
3671         entry = mas_start(mas);
3672         if (mas_is_none(mas))
3673                 return NULL;
3674 
3675         if (mas_is_ptr(mas))
3676                 return entry;
3677 
3678         return mtree_range_walk(mas);
3679 }
3680 
3681 /*
3682  * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3683  * to date.
3684  *
3685  * @mas: The maple state.
3686  *
3687  * Note: Leaves mas in undesirable state.
3688  * Return: The entry for @mas->index or %NULL on dead node.
3689  */
3690 static inline void *mtree_lookup_walk(struct ma_state *mas)
3691 {
3692         unsigned long *pivots;
3693         unsigned char offset;
3694         struct maple_node *node;
3695         struct maple_enode *next;
3696         enum maple_type type;
3697         void __rcu **slots;
3698         unsigned char end;
3699 
3700         next = mas->node;
3701         do {
3702                 node = mte_to_node(next);
3703                 type = mte_node_type(next);
3704                 pivots = ma_pivots(node, type);
3705                 end = mt_pivots[type];
3706                 offset = 0;
3707                 do {
3708                         if (pivots[offset] >= mas->index)
3709                                 break;
3710                 } while (++offset < end);
3711 
3712                 slots = ma_slots(node, type);
3713                 next = mt_slot(mas->tree, slots, offset);
3714                 if (unlikely(ma_dead_node(node)))
3715                         goto dead_node;
3716         } while (!ma_is_leaf(type));
3717 
3718         return (void *)next;
3719 
3720 dead_node:
3721         mas_reset(mas);
3722         return NULL;
3723 }
3724 
3725 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
3726 /*
3727  * mas_new_root() - Create a new root node that only contains the entry passed
3728  * in.
3729  * @mas: The maple state
3730  * @entry: The entry to store.
3731  *
3732  * Only valid when the index == 0 and the last == ULONG_MAX
3733  *
3734  * Return 0 on error, 1 on success.
3735  */
3736 static inline int mas_new_root(struct ma_state *mas, void *entry)
3737 {
3738         struct maple_enode *root = mas_root_locked(mas);
3739         enum maple_type type = maple_leaf_64;
3740         struct maple_node *node;
3741         void __rcu **slots;
3742         unsigned long *pivots;
3743 
3744         if (!entry && !mas->index && mas->last == ULONG_MAX) {
3745                 mas->depth = 0;
3746                 mas_set_height(mas);
3747                 rcu_assign_pointer(mas->tree->ma_root, entry);
3748                 mas->status = ma_start;
3749                 goto done;
3750         }
3751 
3752         mas_node_count(mas, 1);
3753         if (mas_is_err(mas))
3754                 return 0;
3755 
3756         node = mas_pop_node(mas);
3757         pivots = ma_pivots(node, type);
3758         slots = ma_slots(node, type);
3759         node->parent = ma_parent_ptr(mas_tree_parent(mas));
3760         mas->node = mt_mk_node(node, type);
3761         mas->status = ma_active;
3762         rcu_assign_pointer(slots[0], entry);
3763         pivots[0] = mas->last;
3764         mas->depth = 1;
3765         mas_set_height(mas);
3766         rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3767 
3768 done:
3769         if (xa_is_node(root))
3770                 mte_destroy_walk(root, mas->tree);
3771 
3772         return 1;
3773 }
3774 /*
3775  * mas_wr_spanning_store() - Create a subtree with the store operation completed
3776  * and new nodes where necessary, then place the sub-tree in the actual tree.
3777  * Note that mas is expected to point to the node which caused the store to
3778  * span.
3779  * @wr_mas: The maple write state
3780  *
3781  * Return: 0 on error, positive on success.
3782  */
3783 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3784 {
3785         struct maple_subtree_state mast;
3786         struct maple_big_node b_node;
3787         struct ma_state *mas;
3788         unsigned char height;
3789 
3790         /* Left and Right side of spanning store */
3791         MA_STATE(l_mas, NULL, 0, 0);
3792         MA_STATE(r_mas, NULL, 0, 0);
3793         MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3794         MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3795 
3796         /*
3797          * A store operation that spans multiple nodes is called a spanning
3798          * store and is handled early in the store call stack by the function
3799          * mas_is_span_wr().  When a spanning store is identified, the maple
3800          * state is duplicated.  The first maple state walks the left tree path
3801          * to ``index``, the duplicate walks the right tree path to ``last``.
3802          * The data in the two nodes are combined into a single node, two nodes,
3803          * or possibly three nodes (see the 3-way split above).  A ``NULL``
3804          * written to the last entry of a node is considered a spanning store as
3805          * a rebalance is required for the operation to complete and an overflow
3806          * of data may happen.
3807          */
3808         mas = wr_mas->mas;
3809         trace_ma_op(__func__, mas);
3810 
3811         if (unlikely(!mas->index && mas->last == ULONG_MAX))
3812                 return mas_new_root(mas, wr_mas->entry);
3813         /*
3814          * Node rebalancing may occur due to this store, so there may be three new
3815          * entries per level plus a new root.
3816          */
3817         height = mas_mt_height(mas);
3818         mas_node_count(mas, 1 + height * 3);
3819         if (mas_is_err(mas))
3820                 return 0;
3821 
3822         /*
3823          * Set up right side.  Need to get to the next offset after the spanning
3824          * store to ensure it's not NULL and to combine both the next node and
3825          * the node with the start together.
3826          */
3827         r_mas = *mas;
3828         /* Avoid overflow, walk to next slot in the tree. */
3829         if (r_mas.last + 1)
3830                 r_mas.last++;
3831 
3832         r_mas.index = r_mas.last;
3833         mas_wr_walk_index(&r_wr_mas);
3834         r_mas.last = r_mas.index = mas->last;
3835 
3836         /* Set up left side. */
3837         l_mas = *mas;
3838         mas_wr_walk_index(&l_wr_mas);
3839 
3840         if (!wr_mas->entry) {
3841                 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
3842                 mas->offset = l_mas.offset;
3843                 mas->index = l_mas.index;
3844                 mas->last = l_mas.last = r_mas.last;
3845         }
3846 
3847         /* expanding NULLs may make this cover the entire range */
3848         if (!l_mas.index && r_mas.last == ULONG_MAX) {
3849                 mas_set_range(mas, 0, ULONG_MAX);
3850                 return mas_new_root(mas, wr_mas->entry);
3851         }
3852 
3853         memset(&b_node, 0, sizeof(struct maple_big_node));
3854         /* Copy l_mas and store the value in b_node. */
3855         mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
3856         /* Copy r_mas into b_node if there is anything to copy. */
3857         if (r_mas.max > r_mas.last)
3858                 mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
3859                            &b_node, b_node.b_end + 1);
3860         else
3861                 b_node.b_end++;
3862 
3863         /* Stop spanning searches by searching for just index. */
3864         l_mas.index = l_mas.last = mas->index;
3865 
3866         mast.bn = &b_node;
3867         mast.orig_l = &l_mas;
3868         mast.orig_r = &r_mas;
3869         /* Combine l_mas and r_mas and split them up evenly again. */
3870         return mas_spanning_rebalance(mas, &mast, height + 1);
3871 }
3872 
3873 /*
3874  * mas_wr_node_store() - Attempt to store the value in a node
3875  * @wr_mas: The maple write state
3876  *
3877  * Attempts to reuse the node, but may allocate.
3878  *
3879  * Return: True if stored, false otherwise
3880  */
3881 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
3882                                      unsigned char new_end)
3883 {
3884         struct ma_state *mas = wr_mas->mas;
3885         void __rcu **dst_slots;
3886         unsigned long *dst_pivots;
3887         unsigned char dst_offset, offset_end = wr_mas->offset_end;
3888         struct maple_node reuse, *newnode;
3889         unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
3890         bool in_rcu = mt_in_rcu(mas->tree);
3891 
3892         /* Check if there is enough data. The room is enough. */
3893         if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3894             !(mas->mas_flags & MA_STATE_BULK))
3895                 return false;
3896 
3897         if (mas->last == wr_mas->end_piv)
3898                 offset_end++; /* don't copy this offset */
3899         else if (unlikely(wr_mas->r_max == ULONG_MAX))
3900                 mas_bulk_rebalance(mas, mas->end, wr_mas->type);
3901 
3902         /* set up node. */
3903         if (in_rcu) {
3904                 mas_node_count(mas, 1);
3905                 if (mas_is_err(mas))
3906                         return false;
3907 
3908                 newnode = mas_pop_node(mas);
3909         } else {
3910                 memset(&reuse, 0, sizeof(struct maple_node));
3911                 newnode = &reuse;
3912         }
3913 
3914         newnode->parent = mas_mn(mas)->parent;
3915         dst_pivots = ma_pivots(newnode, wr_mas->type);
3916         dst_slots = ma_slots(newnode, wr_mas->type);
3917         /* Copy from start to insert point */
3918         memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
3919         memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
3920 
3921         /* Handle insert of new range starting after old range */
3922         if (wr_mas->r_min < mas->index) {
3923                 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
3924                 dst_pivots[mas->offset++] = mas->index - 1;
3925         }
3926 
3927         /* Store the new entry and range end. */
3928         if (mas->offset < node_pivots)
3929                 dst_pivots[mas->offset] = mas->last;
3930         rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
3931 
3932         /*
3933          * this range wrote to the end of the node or it overwrote the rest of
3934          * the data
3935          */
3936         if (offset_end > mas->end)
3937                 goto done;
3938 
3939         dst_offset = mas->offset + 1;
3940         /* Copy to the end of node if necessary. */
3941         copy_size = mas->end - offset_end + 1;
3942         memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
3943                sizeof(void *) * copy_size);
3944         memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
3945                sizeof(unsigned long) * (copy_size - 1));
3946 
3947         if (new_end < node_pivots)
3948                 dst_pivots[new_end] = mas->max;
3949 
3950 done:
3951         mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
3952         if (in_rcu) {
3953                 struct maple_enode *old_enode = mas->node;
3954 
3955                 mas->node = mt_mk_node(newnode, wr_mas->type);
3956                 mas_replace_node(mas, old_enode);
3957         } else {
3958                 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
3959         }
3960         trace_ma_write(__func__, mas, 0, wr_mas->entry);
3961         mas_update_gap(mas);
3962         mas->end = new_end;
3963         return true;
3964 }
3965 
3966 /*
3967  * mas_wr_slot_store: Attempt to store a value in a slot.
3968  * @wr_mas: the maple write state
3969  *
3970  * Return: True if stored, false otherwise
3971  */
3972 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
3973 {
3974         struct ma_state *mas = wr_mas->mas;
3975         unsigned char offset = mas->offset;
3976         void __rcu **slots = wr_mas->slots;
3977         bool gap = false;
3978 
3979         gap |= !mt_slot_locked(mas->tree, slots, offset);
3980         gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
3981 
3982         if (wr_mas->offset_end - offset == 1) {
3983                 if (mas->index == wr_mas->r_min) {
3984                         /* Overwriting the range and a part of the next one */
3985                         rcu_assign_pointer(slots[offset], wr_mas->entry);
3986                         wr_mas->pivots[offset] = mas->last;
3987                 } else {
3988                         /* Overwriting a part of the range and the next one */
3989                         rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
3990                         wr_mas->pivots[offset] = mas->index - 1;
3991                         mas->offset++; /* Keep mas accurate. */
3992                 }
3993         } else if (!mt_in_rcu(mas->tree)) {
3994                 /*
3995                  * Expand the range, only partially overwriting the previous and
3996                  * next ranges
3997                  */
3998                 gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
3999                 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4000                 wr_mas->pivots[offset] = mas->index - 1;
4001                 wr_mas->pivots[offset + 1] = mas->last;
4002                 mas->offset++; /* Keep mas accurate. */
4003         } else {
4004                 return false;
4005         }
4006 
4007         trace_ma_write(__func__, mas, 0, wr_mas->entry);
4008         /*
4009          * Only update gap when the new entry is empty or there is an empty
4010          * entry in the original two ranges.
4011          */
4012         if (!wr_mas->entry || gap)
4013                 mas_update_gap(mas);
4014 
4015         return true;
4016 }
4017 
4018 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4019 {
4020         struct ma_state *mas = wr_mas->mas;
4021 
4022         if (!wr_mas->slots[wr_mas->offset_end]) {
4023                 /* If this one is null, the next and prev are not */
4024                 mas->last = wr_mas->end_piv;
4025         } else {
4026                 /* Check next slot(s) if we are overwriting the end */
4027                 if ((mas->last == wr_mas->end_piv) &&
4028                     (mas->end != wr_mas->offset_end) &&
4029                     !wr_mas->slots[wr_mas->offset_end + 1]) {
4030                         wr_mas->offset_end++;
4031                         if (wr_mas->offset_end == mas->end)
4032                                 mas->last = mas->max;
4033                         else
4034                                 mas->last = wr_mas->pivots[wr_mas->offset_end];
4035                         wr_mas->end_piv = mas->last;
4036                 }
4037         }
4038 
4039         if (!wr_mas->content) {
4040                 /* If this one is null, the next and prev are not */
4041                 mas->index = wr_mas->r_min;
4042         } else {
4043                 /* Check prev slot if we are overwriting the start */
4044                 if (mas->index == wr_mas->r_min && mas->offset &&
4045                     !wr_mas->slots[mas->offset - 1]) {
4046                         mas->offset--;
4047                         wr_mas->r_min = mas->index =
4048                                 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4049                         wr_mas->r_max = wr_mas->pivots[mas->offset];
4050                 }
4051         }
4052 }
4053 
4054 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4055 {
4056         while ((wr_mas->offset_end < wr_mas->mas->end) &&
4057                (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4058                 wr_mas->offset_end++;
4059 
4060         if (wr_mas->offset_end < wr_mas->mas->end)
4061                 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4062         else
4063                 wr_mas->end_piv = wr_mas->mas->max;
4064 
4065         if (!wr_mas->entry)
4066                 mas_wr_extend_null(wr_mas);
4067 }
4068 
4069 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4070 {
4071         struct ma_state *mas = wr_mas->mas;
4072         unsigned char new_end = mas->end + 2;
4073 
4074         new_end -= wr_mas->offset_end - mas->offset;
4075         if (wr_mas->r_min == mas->index)
4076                 new_end--;
4077 
4078         if (wr_mas->end_piv == mas->last)
4079                 new_end--;
4080 
4081         return new_end;
4082 }
4083 
4084 /*
4085  * mas_wr_append: Attempt to append
4086  * @wr_mas: the maple write state
4087  * @new_end: The end of the node after the modification
4088  *
4089  * This is currently unsafe in rcu mode since the end of the node may be cached
4090  * by readers while the node contents may be updated which could result in
4091  * inaccurate information.
4092  *
4093  * Return: True if appended, false otherwise
4094  */
4095 static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4096                 unsigned char new_end)
4097 {
4098         struct ma_state *mas;
4099         void __rcu **slots;
4100         unsigned char end;
4101 
4102         mas = wr_mas->mas;
4103         if (mt_in_rcu(mas->tree))
4104                 return false;
4105 
4106         end = mas->end;
4107         if (mas->offset != end)
4108                 return false;
4109 
4110         if (new_end < mt_pivots[wr_mas->type]) {
4111                 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4112                 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4113         }
4114 
4115         slots = wr_mas->slots;
4116         if (new_end == end + 1) {
4117                 if (mas->last == wr_mas->r_max) {
4118                         /* Append to end of range */
4119                         rcu_assign_pointer(slots[new_end], wr_mas->entry);
4120                         wr_mas->pivots[end] = mas->index - 1;
4121                         mas->offset = new_end;
4122                 } else {
4123                         /* Append to start of range */
4124                         rcu_assign_pointer(slots[new_end], wr_mas->content);
4125                         wr_mas->pivots[end] = mas->last;
4126                         rcu_assign_pointer(slots[end], wr_mas->entry);
4127                 }
4128         } else {
4129                 /* Append to the range without touching any boundaries. */
4130                 rcu_assign_pointer(slots[new_end], wr_mas->content);
4131                 wr_mas->pivots[end + 1] = mas->last;
4132                 rcu_assign_pointer(slots[end + 1], wr_mas->entry);
4133                 wr_mas->pivots[end] = mas->index - 1;
4134                 mas->offset = end + 1;
4135         }
4136 
4137         if (!wr_mas->content || !wr_mas->entry)
4138                 mas_update_gap(mas);
4139 
4140         mas->end = new_end;
4141         trace_ma_write(__func__, mas, new_end, wr_mas->entry);
4142         return  true;
4143 }
4144 
4145 /*
4146  * mas_wr_bnode() - Slow path for a modification.
4147  * @wr_mas: The write maple state
4148  *
4149  * This is where split, rebalance end up.
4150  */
4151 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4152 {
4153         struct maple_big_node b_node;
4154 
4155         trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4156         memset(&b_node, 0, sizeof(struct maple_big_node));
4157         mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4158         mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
4159 }
4160 
4161 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4162 {
4163         struct ma_state *mas = wr_mas->mas;
4164         unsigned char new_end;
4165 
4166         /* Direct replacement */
4167         if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4168                 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4169                 if (!!wr_mas->entry ^ !!wr_mas->content)
4170                         mas_update_gap(mas);
4171                 return;
4172         }
4173 
4174         /*
4175          * new_end exceeds the size of the maple node and cannot enter the fast
4176          * path.
4177          */
4178         new_end = mas_wr_new_end(wr_mas);
4179         if (new_end >= mt_slots[wr_mas->type])
4180                 goto slow_path;
4181 
4182         /* Attempt to append */
4183         if (mas_wr_append(wr_mas, new_end))
4184                 return;
4185 
4186         if (new_end == mas->end && mas_wr_slot_store(wr_mas))
4187                 return;
4188 
4189         if (mas_wr_node_store(wr_mas, new_end))
4190                 return;
4191 
4192         if (mas_is_err(mas))
4193                 return;
4194 
4195 slow_path:
4196         mas_wr_bnode(wr_mas);
4197 }
4198 
4199 /*
4200  * mas_wr_store_entry() - Internal call to store a value
4201  * @mas: The maple state
4202  * @entry: The entry to store.
4203  *
4204  * Return: The contents that was stored at the index.
4205  */
4206 static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
4207 {
4208         struct ma_state *mas = wr_mas->mas;
4209 
4210         wr_mas->content = mas_start(mas);
4211         if (mas_is_none(mas) || mas_is_ptr(mas)) {
4212                 mas_store_root(mas, wr_mas->entry);
4213                 return;
4214         }
4215 
4216         if (unlikely(!mas_wr_walk(wr_mas))) {
4217                 mas_wr_spanning_store(wr_mas);
4218                 return;
4219         }
4220 
4221         /* At this point, we are at the leaf node that needs to be altered. */
4222         mas_wr_end_piv(wr_mas);
4223         /* New root for a single pointer */
4224         if (unlikely(!mas->index && mas->last == ULONG_MAX))
4225                 mas_new_root(mas, wr_mas->entry);
4226         else
4227                 mas_wr_modify(wr_mas);
4228 }
4229 
4230 /**
4231  * mas_insert() - Internal call to insert a value
4232  * @mas: The maple state
4233  * @entry: The entry to store
4234  *
4235  * Return: %NULL or the contents that already exists at the requested index
4236  * otherwise.  The maple state needs to be checked for error conditions.
4237  */
4238 static inline void *mas_insert(struct ma_state *mas, void *entry)
4239 {
4240         MA_WR_STATE(wr_mas, mas, entry);
4241 
4242         /*
4243          * Inserting a new range inserts either 0, 1, or 2 pivots within the
4244          * tree.  If the insert fits exactly into an existing gap with a value
4245          * of NULL, then the slot only needs to be written with the new value.
4246          * If the range being inserted is adjacent to another range, then only a
4247          * single pivot needs to be inserted (as well as writing the entry).  If
4248          * the new range is within a gap but does not touch any other ranges,
4249          * then two pivots need to be inserted: the start - 1, and the end.  As
4250          * usual, the entry must be written.  Most operations require a new node
4251          * to be allocated and replace an existing node to ensure RCU safety,
4252          * when in RCU mode.  The exception to requiring a newly allocated node
4253          * is when inserting at the end of a node (appending).  When done
4254          * carefully, appending can reuse the node in place.
4255          */
4256         wr_mas.content = mas_start(mas);
4257         if (wr_mas.content)
4258                 goto exists;
4259 
4260         if (mas_is_none(mas) || mas_is_ptr(mas)) {
4261                 mas_store_root(mas, entry);
4262                 return NULL;
4263         }
4264 
4265         /* spanning writes always overwrite something */
4266         if (!mas_wr_walk(&wr_mas))
4267                 goto exists;
4268 
4269         /* At this point, we are at the leaf node that needs to be altered. */
4270         wr_mas.offset_end = mas->offset;
4271         wr_mas.end_piv = wr_mas.r_max;
4272 
4273         if (wr_mas.content || (mas->last > wr_mas.r_max))
4274                 goto exists;
4275 
4276         if (!entry)
4277                 return NULL;
4278 
4279         mas_wr_modify(&wr_mas);
4280         return wr_mas.content;
4281 
4282 exists:
4283         mas_set_err(mas, -EEXIST);
4284         return wr_mas.content;
4285 
4286 }
4287 
4288 /**
4289  * mas_alloc_cyclic() - Internal call to find somewhere to store an entry
4290  * @mas: The maple state.
4291  * @startp: Pointer to ID.
4292  * @range_lo: Lower bound of range to search.
4293  * @range_hi: Upper bound of range to search.
4294  * @entry: The entry to store.
4295  * @next: Pointer to next ID to allocate.
4296  * @gfp: The GFP_FLAGS to use for allocations.
4297  *
4298  * Return: 0 if the allocation succeeded without wrapping, 1 if the
4299  * allocation succeeded after wrapping, or -EBUSY if there are no
4300  * free entries.
4301  */
4302 int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
4303                 void *entry, unsigned long range_lo, unsigned long range_hi,
4304                 unsigned long *next, gfp_t gfp)
4305 {
4306         unsigned long min = range_lo;
4307         int ret = 0;
4308 
4309         range_lo = max(min, *next);
4310         ret = mas_empty_area(mas, range_lo, range_hi, 1);
4311         if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) {
4312                 mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED;
4313                 ret = 1;
4314         }
4315         if (ret < 0 && range_lo > min) {
4316                 ret = mas_empty_area(mas, min, range_hi, 1);
4317                 if (ret == 0)
4318                         ret = 1;
4319         }
4320         if (ret < 0)
4321                 return ret;
4322 
4323         do {
4324                 mas_insert(mas, entry);
4325         } while (mas_nomem(mas, gfp));
4326         if (mas_is_err(mas))
4327                 return xa_err(mas->node);
4328 
4329         *startp = mas->index;
4330         *next = *startp + 1;
4331         if (*next == 0)
4332                 mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
4333 
4334         return ret;
4335 }
4336 EXPORT_SYMBOL(mas_alloc_cyclic);
4337 
4338 static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4339 {
4340 retry:
4341         mas_set(mas, index);
4342         mas_state_walk(mas);
4343         if (mas_is_start(mas))
4344                 goto retry;
4345 }
4346 
4347 static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
4348                 struct maple_node *node, const unsigned long index)
4349 {
4350         if (unlikely(ma_dead_node(node))) {
4351                 mas_rewalk(mas, index);
4352                 return true;
4353         }
4354         return false;
4355 }
4356 
4357 /*
4358  * mas_prev_node() - Find the prev non-null entry at the same level in the
4359  * tree.  The prev value will be mas->node[mas->offset] or the status will be
4360  * ma_none.
4361  * @mas: The maple state
4362  * @min: The lower limit to search
4363  *
4364  * The prev node value will be mas->node[mas->offset] or the status will be
4365  * ma_none.
4366  * Return: 1 if the node is dead, 0 otherwise.
4367  */
4368 static int mas_prev_node(struct ma_state *mas, unsigned long min)
4369 {
4370         enum maple_type mt;
4371         int offset, level;
4372         void __rcu **slots;
4373         struct maple_node *node;
4374         unsigned long *pivots;
4375         unsigned long max;
4376 
4377         node = mas_mn(mas);
4378         if (!mas->min)
4379                 goto no_entry;
4380 
4381         max = mas->min - 1;
4382         if (max < min)
4383                 goto no_entry;
4384 
4385         level = 0;
4386         do {
4387                 if (ma_is_root(node))
4388                         goto no_entry;
4389 
4390                 /* Walk up. */
4391                 if (unlikely(mas_ascend(mas)))
4392                         return 1;
4393                 offset = mas->offset;
4394                 level++;
4395                 node = mas_mn(mas);
4396         } while (!offset);
4397 
4398         offset--;
4399         mt = mte_node_type(mas->node);
4400         while (level > 1) {
4401                 level--;
4402                 slots = ma_slots(node, mt);
4403                 mas->node = mas_slot(mas, slots, offset);
4404                 if (unlikely(ma_dead_node(node)))
4405                         return 1;
4406 
4407                 mt = mte_node_type(mas->node);
4408                 node = mas_mn(mas);
4409                 pivots = ma_pivots(node, mt);
4410                 offset = ma_data_end(node, mt, pivots, max);
4411                 if (unlikely(ma_dead_node(node)))
4412                         return 1;
4413         }
4414 
4415         slots = ma_slots(node, mt);
4416         mas->node = mas_slot(mas, slots, offset);
4417         pivots = ma_pivots(node, mt);
4418         if (unlikely(ma_dead_node(node)))
4419                 return 1;
4420 
4421         if (likely(offset))
4422                 mas->min = pivots[offset - 1] + 1;
4423         mas->max = max;
4424         mas->offset = mas_data_end(mas);
4425         if (unlikely(mte_dead_node(mas->node)))
4426                 return 1;
4427 
4428         mas->end = mas->offset;
4429         return 0;
4430 
4431 no_entry:
4432         if (unlikely(ma_dead_node(node)))
4433                 return 1;
4434 
4435         mas->status = ma_underflow;
4436         return 0;
4437 }
4438 
4439 /*
4440  * mas_prev_slot() - Get the entry in the previous slot
4441  *
4442  * @mas: The maple state
4443  * @max: The minimum starting range
4444  * @empty: Can be empty
4445  * @set_underflow: Set the @mas->node to underflow state on limit.
4446  *
4447  * Return: The entry in the previous slot which is possibly NULL
4448  */
4449 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
4450 {
4451         void *entry;
4452         void __rcu **slots;
4453         unsigned long pivot;
4454         enum maple_type type;
4455         unsigned long *pivots;
4456         struct maple_node *node;
4457         unsigned long save_point = mas->index;
4458 
4459 retry:
4460         node = mas_mn(mas);
4461         type = mte_node_type(mas->node);
4462         pivots = ma_pivots(node, type);
4463         if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4464                 goto retry;
4465 
4466         if (mas->min <= min) {
4467                 pivot = mas_safe_min(mas, pivots, mas->offset);
4468 
4469                 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4470                         goto retry;
4471 
4472                 if (pivot <= min)
4473                         goto underflow;
4474         }
4475 
4476 again:
4477         if (likely(mas->offset)) {
4478                 mas->offset--;
4479                 mas->last = mas->index - 1;
4480                 mas->index = mas_safe_min(mas, pivots, mas->offset);
4481         } else  {
4482                 if (mas->index <= min)
4483                         goto underflow;
4484 
4485                 if (mas_prev_node(mas, min)) {
4486                         mas_rewalk(mas, save_point);
4487                         goto retry;
4488                 }
4489 
4490                 if (WARN_ON_ONCE(mas_is_underflow(mas)))
4491                         return NULL;
4492 
4493                 mas->last = mas->max;
4494                 node = mas_mn(mas);
4495                 type = mte_node_type(mas->node);
4496                 pivots = ma_pivots(node, type);
4497                 mas->index = pivots[mas->offset - 1] + 1;
4498         }
4499 
4500         slots = ma_slots(node, type);
4501         entry = mas_slot(mas, slots, mas->offset);
4502         if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4503                 goto retry;
4504 
4505 
4506         if (likely(entry))
4507                 return entry;
4508 
4509         if (!empty) {
4510                 if (mas->index <= min) {
4511                         mas->status = ma_underflow;
4512                         return NULL;
4513                 }
4514 
4515                 goto again;
4516         }
4517 
4518         return entry;
4519 
4520 underflow:
4521         mas->status = ma_underflow;
4522         return NULL;
4523 }
4524 
4525 /*
4526  * mas_next_node() - Get the next node at the same level in the tree.
4527  * @mas: The maple state
4528  * @max: The maximum pivot value to check.
4529  *
4530  * The next value will be mas->node[mas->offset] or the status will have
4531  * overflowed.
4532  * Return: 1 on dead node, 0 otherwise.
4533  */
4534 static int mas_next_node(struct ma_state *mas, struct maple_node *node,
4535                 unsigned long max)
4536 {
4537         unsigned long min;
4538         unsigned long *pivots;
4539         struct maple_enode *enode;
4540         struct maple_node *tmp;
4541         int level = 0;
4542         unsigned char node_end;
4543         enum maple_type mt;
4544         void __rcu **slots;
4545 
4546         if (mas->max >= max)
4547                 goto overflow;
4548 
4549         min = mas->max + 1;
4550         level = 0;
4551         do {
4552                 if (ma_is_root(node))
4553                         goto overflow;
4554 
4555                 /* Walk up. */
4556                 if (unlikely(mas_ascend(mas)))
4557                         return 1;
4558 
4559                 level++;
4560                 node = mas_mn(mas);
4561                 mt = mte_node_type(mas->node);
4562                 pivots = ma_pivots(node, mt);
4563                 node_end = ma_data_end(node, mt, pivots, mas->max);
4564                 if (unlikely(ma_dead_node(node)))
4565                         return 1;
4566 
4567         } while (unlikely(mas->offset == node_end));
4568 
4569         slots = ma_slots(node, mt);
4570         mas->offset++;
4571         enode = mas_slot(mas, slots, mas->offset);
4572         if (unlikely(ma_dead_node(node)))
4573                 return 1;
4574 
4575         if (level > 1)
4576                 mas->offset = 0;
4577 
4578         while (unlikely(level > 1)) {
4579                 level--;
4580                 mas->node = enode;
4581                 node = mas_mn(mas);
4582                 mt = mte_node_type(mas->node);
4583                 slots = ma_slots(node, mt);
4584                 enode = mas_slot(mas, slots, 0);
4585                 if (unlikely(ma_dead_node(node)))
4586                         return 1;
4587         }
4588 
4589         if (!mas->offset)
4590                 pivots = ma_pivots(node, mt);
4591 
4592         mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4593         tmp = mte_to_node(enode);
4594         mt = mte_node_type(enode);
4595         pivots = ma_pivots(tmp, mt);
4596         mas->end = ma_data_end(tmp, mt, pivots, mas->max);
4597         if (unlikely(ma_dead_node(node)))
4598                 return 1;
4599 
4600         mas->node = enode;
4601         mas->min = min;
4602         return 0;
4603 
4604 overflow:
4605         if (unlikely(ma_dead_node(node)))
4606                 return 1;
4607 
4608         mas->status = ma_overflow;
4609         return 0;
4610 }
4611 
4612 /*
4613  * mas_next_slot() - Get the entry in the next slot
4614  *
4615  * @mas: The maple state
4616  * @max: The maximum starting range
4617  * @empty: Can be empty
4618  * @set_overflow: Should @mas->node be set to overflow when the limit is
4619  * reached.
4620  *
4621  * Return: The entry in the next slot which is possibly NULL
4622  */
4623 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4624 {
4625         void __rcu **slots;
4626         unsigned long *pivots;
4627         unsigned long pivot;
4628         enum maple_type type;
4629         struct maple_node *node;
4630         unsigned long save_point = mas->last;
4631         void *entry;
4632 
4633 retry:
4634         node = mas_mn(mas);
4635         type = mte_node_type(mas->node);
4636         pivots = ma_pivots(node, type);
4637         if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4638                 goto retry;
4639 
4640         if (mas->max >= max) {
4641                 if (likely(mas->offset < mas->end))
4642                         pivot = pivots[mas->offset];
4643                 else
4644                         pivot = mas->max;
4645 
4646                 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4647                         goto retry;
4648 
4649                 if (pivot >= max) { /* Was at the limit, next will extend beyond */
4650                         mas->status = ma_overflow;
4651                         return NULL;
4652                 }
4653         }
4654 
4655         if (likely(mas->offset < mas->end)) {
4656                 mas->index = pivots[mas->offset] + 1;
4657 again:
4658                 mas->offset++;
4659                 if (likely(mas->offset < mas->end))
4660                         mas->last = pivots[mas->offset];
4661                 else
4662                         mas->last = mas->max;
4663         } else  {
4664                 if (mas->last >= max) {
4665                         mas->status = ma_overflow;
4666                         return NULL;
4667                 }
4668 
4669                 if (mas_next_node(mas, node, max)) {
4670                         mas_rewalk(mas, save_point);
4671                         goto retry;
4672                 }
4673 
4674                 if (WARN_ON_ONCE(mas_is_overflow(mas)))
4675                         return NULL;
4676 
4677                 mas->offset = 0;
4678                 mas->index = mas->min;
4679                 node = mas_mn(mas);
4680                 type = mte_node_type(mas->node);
4681                 pivots = ma_pivots(node, type);
4682                 mas->last = pivots[0];
4683         }
4684 
4685         slots = ma_slots(node, type);
4686         entry = mt_slot(mas->tree, slots, mas->offset);
4687         if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4688                 goto retry;
4689 
4690         if (entry)
4691                 return entry;
4692 
4693 
4694         if (!empty) {
4695                 if (mas->last >= max) {
4696                         mas->status = ma_overflow;
4697                         return NULL;
4698                 }
4699 
4700                 mas->index = mas->last + 1;
4701                 goto again;
4702         }
4703 
4704         return entry;
4705 }
4706 
4707 /*
4708  * mas_next_entry() - Internal function to get the next entry.
4709  * @mas: The maple state
4710  * @limit: The maximum range start.
4711  *
4712  * Set the @mas->node to the next entry and the range_start to
4713  * the beginning value for the entry.  Does not check beyond @limit.
4714  * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4715  * @mas->last on overflow.
4716  * Restarts on dead nodes.
4717  *
4718  * Return: the next entry or %NULL.
4719  */
4720 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4721 {
4722         if (mas->last >= limit) {
4723                 mas->status = ma_overflow;
4724                 return NULL;
4725         }
4726 
4727         return mas_next_slot(mas, limit, false);
4728 }
4729 
4730 /*
4731  * mas_rev_awalk() - Internal function.  Reverse allocation walk.  Find the
4732  * highest gap address of a given size in a given node and descend.
4733  * @mas: The maple state
4734  * @size: The needed size.
4735  *
4736  * Return: True if found in a leaf, false otherwise.
4737  *
4738  */
4739 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4740                 unsigned long *gap_min, unsigned long *gap_max)
4741 {
4742         enum maple_type type = mte_node_type(mas->node);
4743         struct maple_node *node = mas_mn(mas);
4744         unsigned long *pivots, *gaps;
4745         void __rcu **slots;
4746         unsigned long gap = 0;
4747         unsigned long max, min;
4748         unsigned char offset;
4749 
4750         if (unlikely(mas_is_err(mas)))
4751                 return true;
4752 
4753         if (ma_is_dense(type)) {
4754                 /* dense nodes. */
4755                 mas->offset = (unsigned char)(mas->index - mas->min);
4756                 return true;
4757         }
4758 
4759         pivots = ma_pivots(node, type);
4760         slots = ma_slots(node, type);
4761         gaps = ma_gaps(node, type);
4762         offset = mas->offset;
4763         min = mas_safe_min(mas, pivots, offset);
4764         /* Skip out of bounds. */
4765         while (mas->last < min)
4766                 min = mas_safe_min(mas, pivots, --offset);
4767 
4768         max = mas_safe_pivot(mas, pivots, offset, type);
4769         while (mas->index <= max) {
4770                 gap = 0;
4771                 if (gaps)
4772                         gap = gaps[offset];
4773                 else if (!mas_slot(mas, slots, offset))
4774                         gap = max - min + 1;
4775 
4776                 if (gap) {
4777                         if ((size <= gap) && (size <= mas->last - min + 1))
4778                                 break;
4779 
4780                         if (!gaps) {
4781                                 /* Skip the next slot, it cannot be a gap. */
4782                                 if (offset < 2)
4783                                         goto ascend;
4784 
4785                                 offset -= 2;
4786                                 max = pivots[offset];
4787                                 min = mas_safe_min(mas, pivots, offset);
4788                                 continue;
4789                         }
4790                 }
4791 
4792                 if (!offset)
4793                         goto ascend;
4794 
4795                 offset--;
4796                 max = min - 1;
4797                 min = mas_safe_min(mas, pivots, offset);
4798         }
4799 
4800         if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4801                 goto no_space;
4802 
4803         if (unlikely(ma_is_leaf(type))) {
4804                 mas->offset = offset;
4805                 *gap_min = min;
4806                 *gap_max = min + gap - 1;
4807                 return true;
4808         }
4809 
4810         /* descend, only happens under lock. */
4811         mas->node = mas_slot(mas, slots, offset);
4812         mas->min = min;
4813         mas->max = max;
4814         mas->offset = mas_data_end(mas);
4815         return false;
4816 
4817 ascend:
4818         if (!mte_is_root(mas->node))
4819                 return false;
4820 
4821 no_space:
4822         mas_set_err(mas, -EBUSY);
4823         return false;
4824 }
4825 
4826 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4827 {
4828         enum maple_type type = mte_node_type(mas->node);
4829         unsigned long pivot, min, gap = 0;
4830         unsigned char offset, data_end;
4831         unsigned long *gaps, *pivots;
4832         void __rcu **slots;
4833         struct maple_node *node;
4834         bool found = false;
4835 
4836         if (ma_is_dense(type)) {
4837                 mas->offset = (unsigned char)(mas->index - mas->min);
4838                 return true;
4839         }
4840 
4841         node = mas_mn(mas);
4842         pivots = ma_pivots(node, type);
4843         slots = ma_slots(node, type);
4844         gaps = ma_gaps(node, type);
4845         offset = mas->offset;
4846         min = mas_safe_min(mas, pivots, offset);
4847         data_end = ma_data_end(node, type, pivots, mas->max);
4848         for (; offset <= data_end; offset++) {
4849                 pivot = mas_safe_pivot(mas, pivots, offset, type);
4850 
4851                 /* Not within lower bounds */
4852                 if (mas->index > pivot)
4853                         goto next_slot;
4854 
4855                 if (gaps)
4856                         gap = gaps[offset];
4857                 else if (!mas_slot(mas, slots, offset))
4858                         gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4859                 else
4860                         goto next_slot;
4861 
4862                 if (gap >= size) {
4863                         if (ma_is_leaf(type)) {
4864                                 found = true;
4865                                 goto done;
4866                         }
4867                         if (mas->index <= pivot) {
4868                                 mas->node = mas_slot(mas, slots, offset);
4869                                 mas->min = min;
4870                                 mas->max = pivot;
4871                                 offset = 0;
4872                                 break;
4873                         }
4874                 }
4875 next_slot:
4876                 min = pivot + 1;
4877                 if (mas->last <= pivot) {
4878                         mas_set_err(mas, -EBUSY);
4879                         return true;
4880                 }
4881         }
4882 
4883         if (mte_is_root(mas->node))
4884                 found = true;
4885 done:
4886         mas->offset = offset;
4887         return found;
4888 }
4889 
4890 /**
4891  * mas_walk() - Search for @mas->index in the tree.
4892  * @mas: The maple state.
4893  *
4894  * mas->index and mas->last will be set to the range if there is a value.  If
4895  * mas->status is ma_none, reset to ma_start
4896  *
4897  * Return: the entry at the location or %NULL.
4898  */
4899 void *mas_walk(struct ma_state *mas)
4900 {
4901         void *entry;
4902 
4903         if (!mas_is_active(mas) || !mas_is_start(mas))
4904                 mas->status = ma_start;
4905 retry:
4906         entry = mas_state_walk(mas);
4907         if (mas_is_start(mas)) {
4908                 goto retry;
4909         } else if (mas_is_none(mas)) {
4910                 mas->index = 0;
4911                 mas->last = ULONG_MAX;
4912         } else if (mas_is_ptr(mas)) {
4913                 if (!mas->index) {
4914                         mas->last = 0;
4915                         return entry;
4916                 }
4917 
4918                 mas->index = 1;
4919                 mas->last = ULONG_MAX;
4920                 mas->status = ma_none;
4921                 return NULL;
4922         }
4923 
4924         return entry;
4925 }
4926 EXPORT_SYMBOL_GPL(mas_walk);
4927 
4928 static inline bool mas_rewind_node(struct ma_state *mas)
4929 {
4930         unsigned char slot;
4931 
4932         do {
4933                 if (mte_is_root(mas->node)) {
4934                         slot = mas->offset;
4935                         if (!slot)
4936                                 return false;
4937                 } else {
4938                         mas_ascend(mas);
4939                         slot = mas->offset;
4940                 }
4941         } while (!slot);
4942 
4943         mas->offset = --slot;
4944         return true;
4945 }
4946 
4947 /*
4948  * mas_skip_node() - Internal function.  Skip over a node.
4949  * @mas: The maple state.
4950  *
4951  * Return: true if there is another node, false otherwise.
4952  */
4953 static inline bool mas_skip_node(struct ma_state *mas)
4954 {
4955         if (mas_is_err(mas))
4956                 return false;
4957 
4958         do {
4959                 if (mte_is_root(mas->node)) {
4960                         if (mas->offset >= mas_data_end(mas)) {
4961                                 mas_set_err(mas, -EBUSY);
4962                                 return false;
4963                         }
4964                 } else {
4965                         mas_ascend(mas);
4966                 }
4967         } while (mas->offset >= mas_data_end(mas));
4968 
4969         mas->offset++;
4970         return true;
4971 }
4972 
4973 /*
4974  * mas_awalk() - Allocation walk.  Search from low address to high, for a gap of
4975  * @size
4976  * @mas: The maple state
4977  * @size: The size of the gap required
4978  *
4979  * Search between @mas->index and @mas->last for a gap of @size.
4980  */
4981 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
4982 {
4983         struct maple_enode *last = NULL;
4984 
4985         /*
4986          * There are 4 options:
4987          * go to child (descend)
4988          * go back to parent (ascend)
4989          * no gap found. (return, slot == MAPLE_NODE_SLOTS)
4990          * found the gap. (return, slot != MAPLE_NODE_SLOTS)
4991          */
4992         while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
4993                 if (last == mas->node)
4994                         mas_skip_node(mas);
4995                 else
4996                         last = mas->node;
4997         }
4998 }
4999 
5000 /*
5001  * mas_sparse_area() - Internal function.  Return upper or lower limit when
5002  * searching for a gap in an empty tree.
5003  * @mas: The maple state
5004  * @min: the minimum range
5005  * @max: The maximum range
5006  * @size: The size of the gap
5007  * @fwd: Searching forward or back
5008  */
5009 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5010                                 unsigned long max, unsigned long size, bool fwd)
5011 {
5012         if (!unlikely(mas_is_none(mas)) && min == 0) {
5013                 min++;
5014                 /*
5015                  * At this time, min is increased, we need to recheck whether
5016                  * the size is satisfied.
5017                  */
5018                 if (min > max || max - min + 1 < size)
5019                         return -EBUSY;
5020         }
5021         /* mas_is_ptr */
5022 
5023         if (fwd) {
5024                 mas->index = min;
5025                 mas->last = min + size - 1;
5026         } else {
5027                 mas->last = max;
5028                 mas->index = max - size + 1;
5029         }
5030         return 0;
5031 }
5032 
5033 /*
5034  * mas_empty_area() - Get the lowest address within the range that is
5035  * sufficient for the size requested.
5036  * @mas: The maple state
5037  * @min: The lowest value of the range
5038  * @max: The highest value of the range
5039  * @size: The size needed
5040  */
5041 int mas_empty_area(struct ma_state *mas, unsigned long min,
5042                 unsigned long max, unsigned long size)
5043 {
5044         unsigned char offset;
5045         unsigned long *pivots;
5046         enum maple_type mt;
5047         struct maple_node *node;
5048 
5049         if (min > max)
5050                 return -EINVAL;
5051 
5052         if (size == 0 || max - min < size - 1)
5053                 return -EINVAL;
5054 
5055         if (mas_is_start(mas))
5056                 mas_start(mas);
5057         else if (mas->offset >= 2)
5058                 mas->offset -= 2;
5059         else if (!mas_skip_node(mas))
5060                 return -EBUSY;
5061 
5062         /* Empty set */
5063         if (mas_is_none(mas) || mas_is_ptr(mas))
5064                 return mas_sparse_area(mas, min, max, size, true);
5065 
5066         /* The start of the window can only be within these values */
5067         mas->index = min;
5068         mas->last = max;
5069         mas_awalk(mas, size);
5070 
5071         if (unlikely(mas_is_err(mas)))
5072                 return xa_err(mas->node);
5073 
5074         offset = mas->offset;
5075         if (unlikely(offset == MAPLE_NODE_SLOTS))
5076                 return -EBUSY;
5077 
5078         node = mas_mn(mas);
5079         mt = mte_node_type(mas->node);
5080         pivots = ma_pivots(node, mt);
5081         min = mas_safe_min(mas, pivots, offset);
5082         if (mas->index < min)
5083                 mas->index = min;
5084         mas->last = mas->index + size - 1;
5085         mas->end = ma_data_end(node, mt, pivots, mas->max);
5086         return 0;
5087 }
5088 EXPORT_SYMBOL_GPL(mas_empty_area);
5089 
5090 /*
5091  * mas_empty_area_rev() - Get the highest address within the range that is
5092  * sufficient for the size requested.
5093  * @mas: The maple state
5094  * @min: The lowest value of the range
5095  * @max: The highest value of the range
5096  * @size: The size needed
5097  */
5098 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5099                 unsigned long max, unsigned long size)
5100 {
5101         struct maple_enode *last = mas->node;
5102 
5103         if (min > max)
5104                 return -EINVAL;
5105 
5106         if (size == 0 || max - min < size - 1)
5107                 return -EINVAL;
5108 
5109         if (mas_is_start(mas))
5110                 mas_start(mas);
5111         else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
5112                 return -EBUSY;
5113 
5114         if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5115                 return mas_sparse_area(mas, min, max, size, false);
5116         else if (mas->offset >= 2)
5117                 mas->offset -= 2;
5118         else
5119                 mas->offset = mas_data_end(mas);
5120 
5121 
5122         /* The start of the window can only be within these values. */
5123         mas->index = min;
5124         mas->last = max;
5125 
5126         while (!mas_rev_awalk(mas, size, &min, &max)) {
5127                 if (last == mas->node) {
5128                         if (!mas_rewind_node(mas))
5129                                 return -EBUSY;
5130                 } else {
5131                         last = mas->node;
5132                 }
5133         }
5134 
5135         if (mas_is_err(mas))
5136                 return xa_err(mas->node);
5137 
5138         if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5139                 return -EBUSY;
5140 
5141         /* Trim the upper limit to the max. */
5142         if (max < mas->last)
5143                 mas->last = max;
5144 
5145         mas->index = mas->last - size + 1;
5146         mas->end = mas_data_end(mas);
5147         return 0;
5148 }
5149 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5150 
5151 /*
5152  * mte_dead_leaves() - Mark all leaves of a node as dead.
5153  * @mas: The maple state
5154  * @slots: Pointer to the slot array
5155  * @type: The maple node type
5156  *
5157  * Must hold the write lock.
5158  *
5159  * Return: The number of leaves marked as dead.
5160  */
5161 static inline
5162 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5163                               void __rcu **slots)
5164 {
5165         struct maple_node *node;
5166         enum maple_type type;
5167         void *entry;
5168         int offset;
5169 
5170         for (offset = 0; offset < mt_slot_count(enode); offset++) {
5171                 entry = mt_slot(mt, slots, offset);
5172                 type = mte_node_type(entry);
5173                 node = mte_to_node(entry);
5174                 /* Use both node and type to catch LE & BE metadata */
5175                 if (!node || !type)
5176                         break;
5177 
5178                 mte_set_node_dead(entry);
5179                 node->type = type;
5180                 rcu_assign_pointer(slots[offset], node);
5181         }
5182 
5183         return offset;
5184 }
5185 
5186 /**
5187  * mte_dead_walk() - Walk down a dead tree to just before the leaves
5188  * @enode: The maple encoded node
5189  * @offset: The starting offset
5190  *
5191  * Note: This can only be used from the RCU callback context.
5192  */
5193 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5194 {
5195         struct maple_node *node, *next;
5196         void __rcu **slots = NULL;
5197 
5198         next = mte_to_node(*enode);
5199         do {
5200                 *enode = ma_enode_ptr(next);
5201                 node = mte_to_node(*enode);
5202                 slots = ma_slots(node, node->type);
5203                 next = rcu_dereference_protected(slots[offset],
5204                                         lock_is_held(&rcu_callback_map));
5205                 offset = 0;
5206         } while (!ma_is_leaf(next->type));
5207 
5208         return slots;
5209 }
5210 
5211 /**
5212  * mt_free_walk() - Walk & free a tree in the RCU callback context
5213  * @head: The RCU head that's within the node.
5214  *
5215  * Note: This can only be used from the RCU callback context.
5216  */
5217 static void mt_free_walk(struct rcu_head *head)
5218 {
5219         void __rcu **slots;
5220         struct maple_node *node, *start;
5221         struct maple_enode *enode;
5222         unsigned char offset;
5223         enum maple_type type;
5224 
5225         node = container_of(head, struct maple_node, rcu);
5226 
5227         if (ma_is_leaf(node->type))
5228                 goto free_leaf;
5229 
5230         start = node;
5231         enode = mt_mk_node(node, node->type);
5232         slots = mte_dead_walk(&enode, 0);
5233         node = mte_to_node(enode);
5234         do {
5235                 mt_free_bulk(node->slot_len, slots);
5236                 offset = node->parent_slot + 1;
5237                 enode = node->piv_parent;
5238                 if (mte_to_node(enode) == node)
5239                         goto free_leaf;
5240 
5241                 type = mte_node_type(enode);
5242                 slots = ma_slots(mte_to_node(enode), type);
5243                 if ((offset < mt_slots[type]) &&
5244                     rcu_dereference_protected(slots[offset],
5245                                               lock_is_held(&rcu_callback_map)))
5246                         slots = mte_dead_walk(&enode, offset);
5247                 node = mte_to_node(enode);
5248         } while ((node != start) || (node->slot_len < offset));
5249 
5250         slots = ma_slots(node, node->type);
5251         mt_free_bulk(node->slot_len, slots);
5252 
5253 free_leaf:
5254         mt_free_rcu(&node->rcu);
5255 }
5256 
5257 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5258         struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5259 {
5260         struct maple_node *node;
5261         struct maple_enode *next = *enode;
5262         void __rcu **slots = NULL;
5263         enum maple_type type;
5264         unsigned char next_offset = 0;
5265 
5266         do {
5267                 *enode = next;
5268                 node = mte_to_node(*enode);
5269                 type = mte_node_type(*enode);
5270                 slots = ma_slots(node, type);
5271                 next = mt_slot_locked(mt, slots, next_offset);
5272                 if ((mte_dead_node(next)))
5273                         next = mt_slot_locked(mt, slots, ++next_offset);
5274 
5275                 mte_set_node_dead(*enode);
5276                 node->type = type;
5277                 node->piv_parent = prev;
5278                 node->parent_slot = offset;
5279                 offset = next_offset;
5280                 next_offset = 0;
5281                 prev = *enode;
5282         } while (!mte_is_leaf(next));
5283 
5284         return slots;
5285 }
5286 
5287 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5288                             bool free)
5289 {
5290         void __rcu **slots;
5291         struct maple_node *node = mte_to_node(enode);
5292         struct maple_enode *start;
5293 
5294         if (mte_is_leaf(enode)) {
5295                 node->type = mte_node_type(enode);
5296                 goto free_leaf;
5297         }
5298 
5299         start = enode;
5300         slots = mte_destroy_descend(&enode, mt, start, 0);
5301         node = mte_to_node(enode); // Updated in the above call.
5302         do {
5303                 enum maple_type type;
5304                 unsigned char offset;
5305                 struct maple_enode *parent, *tmp;
5306 
5307                 node->slot_len = mte_dead_leaves(enode, mt, slots);
5308                 if (free)
5309                         mt_free_bulk(node->slot_len, slots);
5310                 offset = node->parent_slot + 1;
5311                 enode = node->piv_parent;
5312                 if (mte_to_node(enode) == node)
5313                         goto free_leaf;
5314 
5315                 type = mte_node_type(enode);
5316                 slots = ma_slots(mte_to_node(enode), type);
5317                 if (offset >= mt_slots[type])
5318                         goto next;
5319 
5320                 tmp = mt_slot_locked(mt, slots, offset);
5321                 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5322                         parent = enode;
5323                         enode = tmp;
5324                         slots = mte_destroy_descend(&enode, mt, parent, offset);
5325                 }
5326 next:
5327                 node = mte_to_node(enode);
5328         } while (start != enode);
5329 
5330         node = mte_to_node(enode);
5331         node->slot_len = mte_dead_leaves(enode, mt, slots);
5332         if (free)
5333                 mt_free_bulk(node->slot_len, slots);
5334 
5335 free_leaf:
5336         if (free)
5337                 mt_free_rcu(&node->rcu);
5338         else
5339                 mt_clear_meta(mt, node, node->type);
5340 }
5341 
5342 /*
5343  * mte_destroy_walk() - Free a tree or sub-tree.
5344  * @enode: the encoded maple node (maple_enode) to start
5345  * @mt: the tree to free - needed for node types.
5346  *
5347  * Must hold the write lock.
5348  */
5349 static inline void mte_destroy_walk(struct maple_enode *enode,
5350                                     struct maple_tree *mt)
5351 {
5352         struct maple_node *node = mte_to_node(enode);
5353 
5354         if (mt_in_rcu(mt)) {
5355                 mt_destroy_walk(enode, mt, false);
5356                 call_rcu(&node->rcu, mt_free_walk);
5357         } else {
5358                 mt_destroy_walk(enode, mt, true);
5359         }
5360 }
5361 
5362 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5363 {
5364         if (!mas_is_active(wr_mas->mas)) {
5365                 if (mas_is_start(wr_mas->mas))
5366                         return;
5367 
5368                 if (unlikely(mas_is_paused(wr_mas->mas)))
5369                         goto reset;
5370 
5371                 if (unlikely(mas_is_none(wr_mas->mas)))
5372                         goto reset;
5373 
5374                 if (unlikely(mas_is_overflow(wr_mas->mas)))
5375                         goto reset;
5376 
5377                 if (unlikely(mas_is_underflow(wr_mas->mas)))
5378                         goto reset;
5379         }
5380 
5381         /*
5382          * A less strict version of mas_is_span_wr() where we allow spanning
5383          * writes within this node.  This is to stop partial walks in
5384          * mas_prealloc() from being reset.
5385          */
5386         if (wr_mas->mas->last > wr_mas->mas->max)
5387                 goto reset;
5388 
5389         if (wr_mas->entry)
5390                 return;
5391 
5392         if (mte_is_leaf(wr_mas->mas->node) &&
5393             wr_mas->mas->last == wr_mas->mas->max)
5394                 goto reset;
5395 
5396         return;
5397 
5398 reset:
5399         mas_reset(wr_mas->mas);
5400 }
5401 
5402 /* Interface */
5403 
5404 /**
5405  * mas_store() - Store an @entry.
5406  * @mas: The maple state.
5407  * @entry: The entry to store.
5408  *
5409  * The @mas->index and @mas->last is used to set the range for the @entry.
5410  * Note: The @mas should have pre-allocated entries to ensure there is memory to
5411  * store the entry.  Please see mas_expected_entries()/mas_destroy() for more details.
5412  *
5413  * Return: the first entry between mas->index and mas->last or %NULL.
5414  */
5415 void *mas_store(struct ma_state *mas, void *entry)
5416 {
5417         MA_WR_STATE(wr_mas, mas, entry);
5418 
5419         trace_ma_write(__func__, mas, 0, entry);
5420 #ifdef CONFIG_DEBUG_MAPLE_TREE
5421         if (MAS_WARN_ON(mas, mas->index > mas->last))
5422                 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5423 
5424         if (mas->index > mas->last) {
5425                 mas_set_err(mas, -EINVAL);
5426                 return NULL;
5427         }
5428 
5429 #endif
5430 
5431         /*
5432          * Storing is the same operation as insert with the added caveat that it
5433          * can overwrite entries.  Although this seems simple enough, one may
5434          * want to examine what happens if a single store operation was to
5435          * overwrite multiple entries within a self-balancing B-Tree.
5436          */
5437         mas_wr_store_setup(&wr_mas);
5438         mas_wr_store_entry(&wr_mas);
5439         return wr_mas.content;
5440 }
5441 EXPORT_SYMBOL_GPL(mas_store);
5442 
5443 /**
5444  * mas_store_gfp() - Store a value into the tree.
5445  * @mas: The maple state
5446  * @entry: The entry to store
5447  * @gfp: The GFP_FLAGS to use for allocations if necessary.
5448  *
5449  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5450  * be allocated.
5451  */
5452 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5453 {
5454         MA_WR_STATE(wr_mas, mas, entry);
5455 
5456         mas_wr_store_setup(&wr_mas);
5457         trace_ma_write(__func__, mas, 0, entry);
5458 retry:
5459         mas_wr_store_entry(&wr_mas);
5460         if (unlikely(mas_nomem(mas, gfp)))
5461                 goto retry;
5462 
5463         if (unlikely(mas_is_err(mas)))
5464                 return xa_err(mas->node);
5465 
5466         return 0;
5467 }
5468 EXPORT_SYMBOL_GPL(mas_store_gfp);
5469 
5470 /**
5471  * mas_store_prealloc() - Store a value into the tree using memory
5472  * preallocated in the maple state.
5473  * @mas: The maple state
5474  * @entry: The entry to store.
5475  */
5476 void mas_store_prealloc(struct ma_state *mas, void *entry)
5477 {
5478         MA_WR_STATE(wr_mas, mas, entry);
5479 
5480         mas_wr_store_setup(&wr_mas);
5481         trace_ma_write(__func__, mas, 0, entry);
5482         mas_wr_store_entry(&wr_mas);
5483         MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5484         mas_destroy(mas);
5485 }
5486 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5487 
5488 /**
5489  * mas_preallocate() - Preallocate enough nodes for a store operation
5490  * @mas: The maple state
5491  * @entry: The entry that will be stored
5492  * @gfp: The GFP_FLAGS to use for allocations.
5493  *
5494  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5495  */
5496 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5497 {
5498         MA_WR_STATE(wr_mas, mas, entry);
5499         unsigned char node_size;
5500         int request = 1;
5501         int ret;
5502 
5503 
5504         if (unlikely(!mas->index && mas->last == ULONG_MAX))
5505                 goto ask_now;
5506 
5507         mas_wr_store_setup(&wr_mas);
5508         wr_mas.content = mas_start(mas);
5509         /* Root expand */
5510         if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5511                 goto ask_now;
5512 
5513         if (unlikely(!mas_wr_walk(&wr_mas))) {
5514                 /* Spanning store, use worst case for now */
5515                 request = 1 + mas_mt_height(mas) * 3;
5516                 goto ask_now;
5517         }
5518 
5519         /* At this point, we are at the leaf node that needs to be altered. */
5520         /* Exact fit, no nodes needed. */
5521         if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5522                 return 0;
5523 
5524         mas_wr_end_piv(&wr_mas);
5525         node_size = mas_wr_new_end(&wr_mas);
5526 
5527         /* Slot store, does not require additional nodes */
5528         if (node_size == mas->end) {
5529                 /* reuse node */
5530                 if (!mt_in_rcu(mas->tree))
5531                         return 0;
5532                 /* shifting boundary */
5533                 if (wr_mas.offset_end - mas->offset == 1)
5534                         return 0;
5535         }
5536 
5537         if (node_size >= mt_slots[wr_mas.type]) {
5538                 /* Split, worst case for now. */
5539                 request = 1 + mas_mt_height(mas) * 2;
5540                 goto ask_now;
5541         }
5542 
5543         /* New root needs a single node */
5544         if (unlikely(mte_is_root(mas->node)))
5545                 goto ask_now;
5546 
5547         /* Potential spanning rebalance collapsing a node, use worst-case */
5548         if (node_size  - 1 <= mt_min_slots[wr_mas.type])
5549                 request = mas_mt_height(mas) * 2 - 1;
5550 
5551         /* node store, slot store needs one node */
5552 ask_now:
5553         mas_node_count_gfp(mas, request, gfp);
5554         mas->mas_flags |= MA_STATE_PREALLOC;
5555         if (likely(!mas_is_err(mas)))
5556                 return 0;
5557 
5558         mas_set_alloc_req(mas, 0);
5559         ret = xa_err(mas->node);
5560         mas_reset(mas);
5561         mas_destroy(mas);
5562         mas_reset(mas);
5563         return ret;
5564 }
5565 EXPORT_SYMBOL_GPL(mas_preallocate);
5566 
5567 /*
5568  * mas_destroy() - destroy a maple state.
5569  * @mas: The maple state
5570  *
5571  * Upon completion, check the left-most node and rebalance against the node to
5572  * the right if necessary.  Frees any allocated nodes associated with this maple
5573  * state.
5574  */
5575 void mas_destroy(struct ma_state *mas)
5576 {
5577         struct maple_alloc *node;
5578         unsigned long total;
5579 
5580         /*
5581          * When using mas_for_each() to insert an expected number of elements,
5582          * it is possible that the number inserted is less than the expected
5583          * number.  To fix an invalid final node, a check is performed here to
5584          * rebalance the previous node with the final node.
5585          */
5586         if (mas->mas_flags & MA_STATE_REBALANCE) {
5587                 unsigned char end;
5588 
5589                 mas_start(mas);
5590                 mtree_range_walk(mas);
5591                 end = mas->end + 1;
5592                 if (end < mt_min_slot_count(mas->node) - 1)
5593                         mas_destroy_rebalance(mas, end);
5594 
5595                 mas->mas_flags &= ~MA_STATE_REBALANCE;
5596         }
5597         mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5598 
5599         total = mas_allocated(mas);
5600         while (total) {
5601                 node = mas->alloc;
5602                 mas->alloc = node->slot[0];
5603                 if (node->node_count > 1) {
5604                         size_t count = node->node_count - 1;
5605 
5606                         mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5607                         total -= count;
5608                 }
5609                 mt_free_one(ma_mnode_ptr(node));
5610                 total--;
5611         }
5612 
5613         mas->alloc = NULL;
5614 }
5615 EXPORT_SYMBOL_GPL(mas_destroy);
5616 
5617 /*
5618  * mas_expected_entries() - Set the expected number of entries that will be inserted.
5619  * @mas: The maple state
5620  * @nr_entries: The number of expected entries.
5621  *
5622  * This will attempt to pre-allocate enough nodes to store the expected number
5623  * of entries.  The allocations will occur using the bulk allocator interface
5624  * for speed.  Please call mas_destroy() on the @mas after inserting the entries
5625  * to ensure any unused nodes are freed.
5626  *
5627  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5628  */
5629 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5630 {
5631         int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5632         struct maple_enode *enode = mas->node;
5633         int nr_nodes;
5634         int ret;
5635 
5636         /*
5637          * Sometimes it is necessary to duplicate a tree to a new tree, such as
5638          * forking a process and duplicating the VMAs from one tree to a new
5639          * tree.  When such a situation arises, it is known that the new tree is
5640          * not going to be used until the entire tree is populated.  For
5641          * performance reasons, it is best to use a bulk load with RCU disabled.
5642          * This allows for optimistic splitting that favours the left and reuse
5643          * of nodes during the operation.
5644          */
5645 
5646         /* Optimize splitting for bulk insert in-order */
5647         mas->mas_flags |= MA_STATE_BULK;
5648 
5649         /*
5650          * Avoid overflow, assume a gap between each entry and a trailing null.
5651          * If this is wrong, it just means allocation can happen during
5652          * insertion of entries.
5653          */
5654         nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5655         if (!mt_is_alloc(mas->tree))
5656                 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5657 
5658         /* Leaves; reduce slots to keep space for expansion */
5659         nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5660         /* Internal nodes */
5661         nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5662         /* Add working room for split (2 nodes) + new parents */
5663         mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
5664 
5665         /* Detect if allocations run out */
5666         mas->mas_flags |= MA_STATE_PREALLOC;
5667 
5668         if (!mas_is_err(mas))
5669                 return 0;
5670 
5671         ret = xa_err(mas->node);
5672         mas->node = enode;
5673         mas_destroy(mas);
5674         return ret;
5675 
5676 }
5677 EXPORT_SYMBOL_GPL(mas_expected_entries);
5678 
5679 static bool mas_next_setup(struct ma_state *mas, unsigned long max,
5680                 void **entry)
5681 {
5682         bool was_none = mas_is_none(mas);
5683 
5684         if (unlikely(mas->last >= max)) {
5685                 mas->status = ma_overflow;
5686                 return true;
5687         }
5688 
5689         switch (mas->status) {
5690         case ma_active:
5691                 return false;
5692         case ma_none:
5693                 fallthrough;
5694         case ma_pause:
5695                 mas->status = ma_start;
5696                 fallthrough;
5697         case ma_start:
5698                 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5699                 break;
5700         case ma_overflow:
5701                 /* Overflowed before, but the max changed */
5702                 mas->status = ma_active;
5703                 break;
5704         case ma_underflow:
5705                 /* The user expects the mas to be one before where it is */
5706                 mas->status = ma_active;
5707                 *entry = mas_walk(mas);
5708                 if (*entry)
5709                         return true;
5710                 break;
5711         case ma_root:
5712                 break;
5713         case ma_error:
5714                 return true;
5715         }
5716 
5717         if (likely(mas_is_active(mas))) /* Fast path */
5718                 return false;
5719 
5720         if (mas_is_ptr(mas)) {
5721                 *entry = NULL;
5722                 if (was_none && mas->index == 0) {
5723                         mas->index = mas->last = 0;
5724                         return true;
5725                 }
5726                 mas->index = 1;
5727                 mas->last = ULONG_MAX;
5728                 mas->status = ma_none;
5729                 return true;
5730         }
5731 
5732         if (mas_is_none(mas))
5733                 return true;
5734 
5735         return false;
5736 }
5737 
5738 /**
5739  * mas_next() - Get the next entry.
5740  * @mas: The maple state
5741  * @max: The maximum index to check.
5742  *
5743  * Returns the next entry after @mas->index.
5744  * Must hold rcu_read_lock or the write lock.
5745  * Can return the zero entry.
5746  *
5747  * Return: The next entry or %NULL
5748  */
5749 void *mas_next(struct ma_state *mas, unsigned long max)
5750 {
5751         void *entry = NULL;
5752 
5753         if (mas_next_setup(mas, max, &entry))
5754                 return entry;
5755 
5756         /* Retries on dead nodes handled by mas_next_slot */
5757         return mas_next_slot(mas, max, false);
5758 }
5759 EXPORT_SYMBOL_GPL(mas_next);
5760 
5761 /**
5762  * mas_next_range() - Advance the maple state to the next range
5763  * @mas: The maple state
5764  * @max: The maximum index to check.
5765  *
5766  * Sets @mas->index and @mas->last to the range.
5767  * Must hold rcu_read_lock or the write lock.
5768  * Can return the zero entry.
5769  *
5770  * Return: The next entry or %NULL
5771  */
5772 void *mas_next_range(struct ma_state *mas, unsigned long max)
5773 {
5774         void *entry = NULL;
5775 
5776         if (mas_next_setup(mas, max, &entry))
5777                 return entry;
5778 
5779         /* Retries on dead nodes handled by mas_next_slot */
5780         return mas_next_slot(mas, max, true);
5781 }
5782 EXPORT_SYMBOL_GPL(mas_next_range);
5783 
5784 /**
5785  * mt_next() - get the next value in the maple tree
5786  * @mt: The maple tree
5787  * @index: The start index
5788  * @max: The maximum index to check
5789  *
5790  * Takes RCU read lock internally to protect the search, which does not
5791  * protect the returned pointer after dropping RCU read lock.
5792  * See also: Documentation/core-api/maple_tree.rst
5793  *
5794  * Return: The entry higher than @index or %NULL if nothing is found.
5795  */
5796 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5797 {
5798         void *entry = NULL;
5799         MA_STATE(mas, mt, index, index);
5800 
5801         rcu_read_lock();
5802         entry = mas_next(&mas, max);
5803         rcu_read_unlock();
5804         return entry;
5805 }
5806 EXPORT_SYMBOL_GPL(mt_next);
5807 
5808 static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
5809 {
5810         if (unlikely(mas->index <= min)) {
5811                 mas->status = ma_underflow;
5812                 return true;
5813         }
5814 
5815         switch (mas->status) {
5816         case ma_active:
5817                 return false;
5818         case ma_start:
5819                 break;
5820         case ma_none:
5821                 fallthrough;
5822         case ma_pause:
5823                 mas->status = ma_start;
5824                 break;
5825         case ma_underflow:
5826                 /* underflowed before but the min changed */
5827                 mas->status = ma_active;
5828                 break;
5829         case ma_overflow:
5830                 /* User expects mas to be one after where it is */
5831                 mas->status = ma_active;
5832                 *entry = mas_walk(mas);
5833                 if (*entry)
5834                         return true;
5835                 break;
5836         case ma_root:
5837                 break;
5838         case ma_error:
5839                 return true;
5840         }
5841 
5842         if (mas_is_start(mas))
5843                 mas_walk(mas);
5844 
5845         if (unlikely(mas_is_ptr(mas))) {
5846                 if (!mas->index) {
5847                         mas->status = ma_none;
5848                         return true;
5849                 }
5850                 mas->index = mas->last = 0;
5851                 *entry = mas_root(mas);
5852                 return true;
5853         }
5854 
5855         if (mas_is_none(mas)) {
5856                 if (mas->index) {
5857                         /* Walked to out-of-range pointer? */
5858                         mas->index = mas->last = 0;
5859                         mas->status = ma_root;
5860                         *entry = mas_root(mas);
5861                         return true;
5862                 }
5863                 return true;
5864         }
5865 
5866         return false;
5867 }
5868 
5869 /**
5870  * mas_prev() - Get the previous entry
5871  * @mas: The maple state
5872  * @min: The minimum value to check.
5873  *
5874  * Must hold rcu_read_lock or the write lock.
5875  * Will reset mas to ma_start if the status is ma_none.  Will stop on not
5876  * searchable nodes.
5877  *
5878  * Return: the previous value or %NULL.
5879  */
5880 void *mas_prev(struct ma_state *mas, unsigned long min)
5881 {
5882         void *entry = NULL;
5883 
5884         if (mas_prev_setup(mas, min, &entry))
5885                 return entry;
5886 
5887         return mas_prev_slot(mas, min, false);
5888 }
5889 EXPORT_SYMBOL_GPL(mas_prev);
5890 
5891 /**
5892  * mas_prev_range() - Advance to the previous range
5893  * @mas: The maple state
5894  * @min: The minimum value to check.
5895  *
5896  * Sets @mas->index and @mas->last to the range.
5897  * Must hold rcu_read_lock or the write lock.
5898  * Will reset mas to ma_start if the node is ma_none.  Will stop on not
5899  * searchable nodes.
5900  *
5901  * Return: the previous value or %NULL.
5902  */
5903 void *mas_prev_range(struct ma_state *mas, unsigned long min)
5904 {
5905         void *entry = NULL;
5906 
5907         if (mas_prev_setup(mas, min, &entry))
5908                 return entry;
5909 
5910         return mas_prev_slot(mas, min, true);
5911 }
5912 EXPORT_SYMBOL_GPL(mas_prev_range);
5913 
5914 /**
5915  * mt_prev() - get the previous value in the maple tree
5916  * @mt: The maple tree
5917  * @index: The start index
5918  * @min: The minimum index to check
5919  *
5920  * Takes RCU read lock internally to protect the search, which does not
5921  * protect the returned pointer after dropping RCU read lock.
5922  * See also: Documentation/core-api/maple_tree.rst
5923  *
5924  * Return: The entry before @index or %NULL if nothing is found.
5925  */
5926 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5927 {
5928         void *entry = NULL;
5929         MA_STATE(mas, mt, index, index);
5930 
5931         rcu_read_lock();
5932         entry = mas_prev(&mas, min);
5933         rcu_read_unlock();
5934         return entry;
5935 }
5936 EXPORT_SYMBOL_GPL(mt_prev);
5937 
5938 /**
5939  * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5940  * @mas: The maple state to pause
5941  *
5942  * Some users need to pause a walk and drop the lock they're holding in
5943  * order to yield to a higher priority thread or carry out an operation
5944  * on an entry.  Those users should call this function before they drop
5945  * the lock.  It resets the @mas to be suitable for the next iteration
5946  * of the loop after the user has reacquired the lock.  If most entries
5947  * found during a walk require you to call mas_pause(), the mt_for_each()
5948  * iterator may be more appropriate.
5949  *
5950  */
5951 void mas_pause(struct ma_state *mas)
5952 {
5953         mas->status = ma_pause;
5954         mas->node = NULL;
5955 }
5956 EXPORT_SYMBOL_GPL(mas_pause);
5957 
5958 /**
5959  * mas_find_setup() - Internal function to set up mas_find*().
5960  * @mas: The maple state
5961  * @max: The maximum index
5962  * @entry: Pointer to the entry
5963  *
5964  * Returns: True if entry is the answer, false otherwise.
5965  */
5966 static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
5967 {
5968         switch (mas->status) {
5969         case ma_active:
5970                 if (mas->last < max)
5971                         return false;
5972                 return true;
5973         case ma_start:
5974                 break;
5975         case ma_pause:
5976                 if (unlikely(mas->last >= max))
5977                         return true;
5978 
5979                 mas->index = ++mas->last;
5980                 mas->status = ma_start;
5981                 break;
5982         case ma_none:
5983                 if (unlikely(mas->last >= max))
5984                         return true;
5985 
5986                 mas->index = mas->last;
5987                 mas->status = ma_start;
5988                 break;
5989         case ma_underflow:
5990                 /* mas is pointing at entry before unable to go lower */
5991                 if (unlikely(mas->index >= max)) {
5992                         mas->status = ma_overflow;
5993                         return true;
5994                 }
5995 
5996                 mas->status = ma_active;
5997                 *entry = mas_walk(mas);
5998                 if (*entry)
5999                         return true;
6000                 break;
6001         case ma_overflow:
6002                 if (unlikely(mas->last >= max))
6003                         return true;
6004 
6005                 mas->status = ma_active;
6006                 *entry = mas_walk(mas);
6007                 if (*entry)
6008                         return true;
6009                 break;
6010         case ma_root:
6011                 break;
6012         case ma_error:
6013                 return true;
6014         }
6015 
6016         if (mas_is_start(mas)) {
6017                 /* First run or continue */
6018                 if (mas->index > max)
6019                         return true;
6020 
6021                 *entry = mas_walk(mas);
6022                 if (*entry)
6023                         return true;
6024 
6025         }
6026 
6027         if (unlikely(mas_is_ptr(mas)))
6028                 goto ptr_out_of_range;
6029 
6030         if (unlikely(mas_is_none(mas)))
6031                 return true;
6032 
6033         if (mas->index == max)
6034                 return true;
6035 
6036         return false;
6037 
6038 ptr_out_of_range:
6039         mas->status = ma_none;
6040         mas->index = 1;
6041         mas->last = ULONG_MAX;
6042         return true;
6043 }
6044 
6045 /**
6046  * mas_find() - On the first call, find the entry at or after mas->index up to
6047  * %max.  Otherwise, find the entry after mas->index.
6048  * @mas: The maple state
6049  * @max: The maximum value to check.
6050  *
6051  * Must hold rcu_read_lock or the write lock.
6052  * If an entry exists, last and index are updated accordingly.
6053  * May set @mas->status to ma_overflow.
6054  *
6055  * Return: The entry or %NULL.
6056  */
6057 void *mas_find(struct ma_state *mas, unsigned long max)
6058 {
6059         void *entry = NULL;
6060 
6061         if (mas_find_setup(mas, max, &entry))
6062                 return entry;
6063 
6064         /* Retries on dead nodes handled by mas_next_slot */
6065         entry = mas_next_slot(mas, max, false);
6066         /* Ignore overflow */
6067         mas->status = ma_active;
6068         return entry;
6069 }
6070 EXPORT_SYMBOL_GPL(mas_find);
6071 
6072 /**
6073  * mas_find_range() - On the first call, find the entry at or after
6074  * mas->index up to %max.  Otherwise, advance to the next slot mas->index.
6075  * @mas: The maple state
6076  * @max: The maximum value to check.
6077  *
6078  * Must hold rcu_read_lock or the write lock.
6079  * If an entry exists, last and index are updated accordingly.
6080  * May set @mas->status to ma_overflow.
6081  *
6082  * Return: The entry or %NULL.
6083  */
6084 void *mas_find_range(struct ma_state *mas, unsigned long max)
6085 {
6086         void *entry = NULL;
6087 
6088         if (mas_find_setup(mas, max, &entry))
6089                 return entry;
6090 
6091         /* Retries on dead nodes handled by mas_next_slot */
6092         return mas_next_slot(mas, max, true);
6093 }
6094 EXPORT_SYMBOL_GPL(mas_find_range);
6095 
6096 /**
6097  * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6098  * @mas: The maple state
6099  * @min: The minimum index
6100  * @entry: Pointer to the entry
6101  *
6102  * Returns: True if entry is the answer, false otherwise.
6103  */
6104 static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6105                 void **entry)
6106 {
6107 
6108         switch (mas->status) {
6109         case ma_active:
6110                 goto active;
6111         case ma_start:
6112                 break;
6113         case ma_pause:
6114                 if (unlikely(mas->index <= min)) {
6115                         mas->status = ma_underflow;
6116                         return true;
6117                 }
6118                 mas->last = --mas->index;
6119                 mas->status = ma_start;
6120                 break;
6121         case ma_none:
6122                 if (mas->index <= min)
6123                         goto none;
6124 
6125                 mas->last = mas->index;
6126                 mas->status = ma_start;
6127                 break;
6128         case ma_overflow: /* user expects the mas to be one after where it is */
6129                 if (unlikely(mas->index <= min)) {
6130                         mas->status = ma_underflow;
6131                         return true;
6132                 }
6133 
6134                 mas->status = ma_active;
6135                 break;
6136         case ma_underflow: /* user expects the mas to be one before where it is */
6137                 if (unlikely(mas->index <= min))
6138                         return true;
6139 
6140                 mas->status = ma_active;
6141                 break;
6142         case ma_root:
6143                 break;
6144         case ma_error:
6145                 return true;
6146         }
6147 
6148         if (mas_is_start(mas)) {
6149                 /* First run or continue */
6150                 if (mas->index < min)
6151                         return true;
6152 
6153                 *entry = mas_walk(mas);
6154                 if (*entry)
6155                         return true;
6156         }
6157 
6158         if (unlikely(mas_is_ptr(mas)))
6159                 goto none;
6160 
6161         if (unlikely(mas_is_none(mas))) {
6162                 /*
6163                  * Walked to the location, and there was nothing so the previous
6164                  * location is 0.
6165                  */
6166                 mas->last = mas->index = 0;
6167                 mas->status = ma_root;
6168                 *entry = mas_root(mas);
6169                 return true;
6170         }
6171 
6172 active:
6173         if (mas->index < min)
6174                 return true;
6175 
6176         return false;
6177 
6178 none:
6179         mas->status = ma_none;
6180         return true;
6181 }
6182 
6183 /**
6184  * mas_find_rev: On the first call, find the first non-null entry at or below
6185  * mas->index down to %min.  Otherwise find the first non-null entry below
6186  * mas->index down to %min.
6187  * @mas: The maple state
6188  * @min: The minimum value to check.
6189  *
6190  * Must hold rcu_read_lock or the write lock.
6191  * If an entry exists, last and index are updated accordingly.
6192  * May set @mas->status to ma_underflow.
6193  *
6194  * Return: The entry or %NULL.
6195  */
6196 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6197 {
6198         void *entry = NULL;
6199 
6200         if (mas_find_rev_setup(mas, min, &entry))
6201                 return entry;
6202 
6203         /* Retries on dead nodes handled by mas_prev_slot */
6204         return mas_prev_slot(mas, min, false);
6205 
6206 }
6207 EXPORT_SYMBOL_GPL(mas_find_rev);
6208 
6209 /**
6210  * mas_find_range_rev: On the first call, find the first non-null entry at or
6211  * below mas->index down to %min.  Otherwise advance to the previous slot after
6212  * mas->index down to %min.
6213  * @mas: The maple state
6214  * @min: The minimum value to check.
6215  *
6216  * Must hold rcu_read_lock or the write lock.
6217  * If an entry exists, last and index are updated accordingly.
6218  * May set @mas->status to ma_underflow.
6219  *
6220  * Return: The entry or %NULL.
6221  */
6222 void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6223 {
6224         void *entry = NULL;
6225 
6226         if (mas_find_rev_setup(mas, min, &entry))
6227                 return entry;
6228 
6229         /* Retries on dead nodes handled by mas_prev_slot */
6230         return mas_prev_slot(mas, min, true);
6231 }
6232 EXPORT_SYMBOL_GPL(mas_find_range_rev);
6233 
6234 /**
6235  * mas_erase() - Find the range in which index resides and erase the entire
6236  * range.
6237  * @mas: The maple state
6238  *
6239  * Must hold the write lock.
6240  * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6241  * erases that range.
6242  *
6243  * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6244  */
6245 void *mas_erase(struct ma_state *mas)
6246 {
6247         void *entry;
6248         MA_WR_STATE(wr_mas, mas, NULL);
6249 
6250         if (!mas_is_active(mas) || !mas_is_start(mas))
6251                 mas->status = ma_start;
6252 
6253         /* Retry unnecessary when holding the write lock. */
6254         entry = mas_state_walk(mas);
6255         if (!entry)
6256                 return NULL;
6257 
6258 write_retry:
6259         /* Must reset to ensure spanning writes of last slot are detected */
6260         mas_reset(mas);
6261         mas_wr_store_setup(&wr_mas);
6262         mas_wr_store_entry(&wr_mas);
6263         if (mas_nomem(mas, GFP_KERNEL))
6264                 goto write_retry;
6265 
6266         return entry;
6267 }
6268 EXPORT_SYMBOL_GPL(mas_erase);
6269 
6270 /**
6271  * mas_nomem() - Check if there was an error allocating and do the allocation
6272  * if necessary If there are allocations, then free them.
6273  * @mas: The maple state
6274  * @gfp: The GFP_FLAGS to use for allocations
6275  * Return: true on allocation, false otherwise.
6276  */
6277 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6278         __must_hold(mas->tree->ma_lock)
6279 {
6280         if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6281                 mas_destroy(mas);
6282                 return false;
6283         }
6284 
6285         if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6286                 mtree_unlock(mas->tree);
6287                 mas_alloc_nodes(mas, gfp);
6288                 mtree_lock(mas->tree);
6289         } else {
6290                 mas_alloc_nodes(mas, gfp);
6291         }
6292 
6293         if (!mas_allocated(mas))
6294                 return false;
6295 
6296         mas->status = ma_start;
6297         return true;
6298 }
6299 
6300 void __init maple_tree_init(void)
6301 {
6302         maple_node_cache = kmem_cache_create("maple_node",
6303                         sizeof(struct maple_node), sizeof(struct maple_node),
6304                         SLAB_PANIC, NULL);
6305 }
6306 
6307 /**
6308  * mtree_load() - Load a value stored in a maple tree
6309  * @mt: The maple tree
6310  * @index: The index to load
6311  *
6312  * Return: the entry or %NULL
6313  */
6314 void *mtree_load(struct maple_tree *mt, unsigned long index)
6315 {
6316         MA_STATE(mas, mt, index, index);
6317         void *entry;
6318 
6319         trace_ma_read(__func__, &mas);
6320         rcu_read_lock();
6321 retry:
6322         entry = mas_start(&mas);
6323         if (unlikely(mas_is_none(&mas)))
6324                 goto unlock;
6325 
6326         if (unlikely(mas_is_ptr(&mas))) {
6327                 if (index)
6328                         entry = NULL;
6329 
6330                 goto unlock;
6331         }
6332 
6333         entry = mtree_lookup_walk(&mas);
6334         if (!entry && unlikely(mas_is_start(&mas)))
6335                 goto retry;
6336 unlock:
6337         rcu_read_unlock();
6338         if (xa_is_zero(entry))
6339                 return NULL;
6340 
6341         return entry;
6342 }
6343 EXPORT_SYMBOL(mtree_load);
6344 
6345 /**
6346  * mtree_store_range() - Store an entry at a given range.
6347  * @mt: The maple tree
6348  * @index: The start of the range
6349  * @last: The end of the range
6350  * @entry: The entry to store
6351  * @gfp: The GFP_FLAGS to use for allocations
6352  *
6353  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6354  * be allocated.
6355  */
6356 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6357                 unsigned long last, void *entry, gfp_t gfp)
6358 {
6359         MA_STATE(mas, mt, index, last);
6360         MA_WR_STATE(wr_mas, &mas, entry);
6361 
6362         trace_ma_write(__func__, &mas, 0, entry);
6363         if (WARN_ON_ONCE(xa_is_advanced(entry)))
6364                 return -EINVAL;
6365 
6366         if (index > last)
6367                 return -EINVAL;
6368 
6369         mtree_lock(mt);
6370 retry:
6371         mas_wr_store_entry(&wr_mas);
6372         if (mas_nomem(&mas, gfp))
6373                 goto retry;
6374 
6375         mtree_unlock(mt);
6376         if (mas_is_err(&mas))
6377                 return xa_err(mas.node);
6378 
6379         return 0;
6380 }
6381 EXPORT_SYMBOL(mtree_store_range);
6382 
6383 /**
6384  * mtree_store() - Store an entry at a given index.
6385  * @mt: The maple tree
6386  * @index: The index to store the value
6387  * @entry: The entry to store
6388  * @gfp: The GFP_FLAGS to use for allocations
6389  *
6390  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6391  * be allocated.
6392  */
6393 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6394                  gfp_t gfp)
6395 {
6396         return mtree_store_range(mt, index, index, entry, gfp);
6397 }
6398 EXPORT_SYMBOL(mtree_store);
6399 
6400 /**
6401  * mtree_insert_range() - Insert an entry at a given range if there is no value.
6402  * @mt: The maple tree
6403  * @first: The start of the range
6404  * @last: The end of the range
6405  * @entry: The entry to store
6406  * @gfp: The GFP_FLAGS to use for allocations.
6407  *
6408  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6409  * request, -ENOMEM if memory could not be allocated.
6410  */
6411 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6412                 unsigned long last, void *entry, gfp_t gfp)
6413 {
6414         MA_STATE(ms, mt, first, last);
6415 
6416         if (WARN_ON_ONCE(xa_is_advanced(entry)))
6417                 return -EINVAL;
6418 
6419         if (first > last)
6420                 return -EINVAL;
6421 
6422         mtree_lock(mt);
6423 retry:
6424         mas_insert(&ms, entry);
6425         if (mas_nomem(&ms, gfp))
6426                 goto retry;
6427 
6428         mtree_unlock(mt);
6429         if (mas_is_err(&ms))
6430                 return xa_err(ms.node);
6431 
6432         return 0;
6433 }
6434 EXPORT_SYMBOL(mtree_insert_range);
6435 
6436 /**
6437  * mtree_insert() - Insert an entry at a given index if there is no value.
6438  * @mt: The maple tree
6439  * @index : The index to store the value
6440  * @entry: The entry to store
6441  * @gfp: The GFP_FLAGS to use for allocations.
6442  *
6443  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6444  * request, -ENOMEM if memory could not be allocated.
6445  */
6446 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6447                  gfp_t gfp)
6448 {
6449         return mtree_insert_range(mt, index, index, entry, gfp);
6450 }
6451 EXPORT_SYMBOL(mtree_insert);
6452 
6453 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6454                 void *entry, unsigned long size, unsigned long min,
6455                 unsigned long max, gfp_t gfp)
6456 {
6457         int ret = 0;
6458 
6459         MA_STATE(mas, mt, 0, 0);
6460         if (!mt_is_alloc(mt))
6461                 return -EINVAL;
6462 
6463         if (WARN_ON_ONCE(mt_is_reserved(entry)))
6464                 return -EINVAL;
6465 
6466         mtree_lock(mt);
6467 retry:
6468         ret = mas_empty_area(&mas, min, max, size);
6469         if (ret)
6470                 goto unlock;
6471 
6472         mas_insert(&mas, entry);
6473         /*
6474          * mas_nomem() may release the lock, causing the allocated area
6475          * to be unavailable, so try to allocate a free area again.
6476          */
6477         if (mas_nomem(&mas, gfp))
6478                 goto retry;
6479 
6480         if (mas_is_err(&mas))
6481                 ret = xa_err(mas.node);
6482         else
6483                 *startp = mas.index;
6484 
6485 unlock:
6486         mtree_unlock(mt);
6487         return ret;
6488 }
6489 EXPORT_SYMBOL(mtree_alloc_range);
6490 
6491 /**
6492  * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
6493  * @mt: The maple tree.
6494  * @startp: Pointer to ID.
6495  * @range_lo: Lower bound of range to search.
6496  * @range_hi: Upper bound of range to search.
6497  * @entry: The entry to store.
6498  * @next: Pointer to next ID to allocate.
6499  * @gfp: The GFP_FLAGS to use for allocations.
6500  *
6501  * Finds an empty entry in @mt after @next, stores the new index into
6502  * the @id pointer, stores the entry at that index, then updates @next.
6503  *
6504  * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
6505  *
6506  * Context: Any context.  Takes and releases the mt.lock.  May sleep if
6507  * the @gfp flags permit.
6508  *
6509  * Return: 0 if the allocation succeeded without wrapping, 1 if the
6510  * allocation succeeded after wrapping, -ENOMEM if memory could not be
6511  * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
6512  * free entries.
6513  */
6514 int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
6515                 void *entry, unsigned long range_lo, unsigned long range_hi,
6516                 unsigned long *next, gfp_t gfp)
6517 {
6518         int ret;
6519 
6520         MA_STATE(mas, mt, 0, 0);
6521 
6522         if (!mt_is_alloc(mt))
6523                 return -EINVAL;
6524         if (WARN_ON_ONCE(mt_is_reserved(entry)))
6525                 return -EINVAL;
6526         mtree_lock(mt);
6527         ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi,
6528                                next, gfp);
6529         mtree_unlock(mt);
6530         return ret;
6531 }
6532 EXPORT_SYMBOL(mtree_alloc_cyclic);
6533 
6534 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6535                 void *entry, unsigned long size, unsigned long min,
6536                 unsigned long max, gfp_t gfp)
6537 {
6538         int ret = 0;
6539 
6540         MA_STATE(mas, mt, 0, 0);
6541         if (!mt_is_alloc(mt))
6542                 return -EINVAL;
6543 
6544         if (WARN_ON_ONCE(mt_is_reserved(entry)))
6545                 return -EINVAL;
6546 
6547         mtree_lock(mt);
6548 retry:
6549         ret = mas_empty_area_rev(&mas, min, max, size);
6550         if (ret)
6551                 goto unlock;
6552 
6553         mas_insert(&mas, entry);
6554         /*
6555          * mas_nomem() may release the lock, causing the allocated area
6556          * to be unavailable, so try to allocate a free area again.
6557          */
6558         if (mas_nomem(&mas, gfp))
6559                 goto retry;
6560 
6561         if (mas_is_err(&mas))
6562                 ret = xa_err(mas.node);
6563         else
6564                 *startp = mas.index;
6565 
6566 unlock:
6567         mtree_unlock(mt);
6568         return ret;
6569 }
6570 EXPORT_SYMBOL(mtree_alloc_rrange);
6571 
6572 /**
6573  * mtree_erase() - Find an index and erase the entire range.
6574  * @mt: The maple tree
6575  * @index: The index to erase
6576  *
6577  * Erasing is the same as a walk to an entry then a store of a NULL to that
6578  * ENTIRE range.  In fact, it is implemented as such using the advanced API.
6579  *
6580  * Return: The entry stored at the @index or %NULL
6581  */
6582 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6583 {
6584         void *entry = NULL;
6585 
6586         MA_STATE(mas, mt, index, index);
6587         trace_ma_op(__func__, &mas);
6588 
6589         mtree_lock(mt);
6590         entry = mas_erase(&mas);
6591         mtree_unlock(mt);
6592 
6593         return entry;
6594 }
6595 EXPORT_SYMBOL(mtree_erase);
6596 
6597 /*
6598  * mas_dup_free() - Free an incomplete duplication of a tree.
6599  * @mas: The maple state of a incomplete tree.
6600  *
6601  * The parameter @mas->node passed in indicates that the allocation failed on
6602  * this node. This function frees all nodes starting from @mas->node in the
6603  * reverse order of mas_dup_build(). There is no need to hold the source tree
6604  * lock at this time.
6605  */
6606 static void mas_dup_free(struct ma_state *mas)
6607 {
6608         struct maple_node *node;
6609         enum maple_type type;
6610         void __rcu **slots;
6611         unsigned char count, i;
6612 
6613         /* Maybe the first node allocation failed. */
6614         if (mas_is_none(mas))
6615                 return;
6616 
6617         while (!mte_is_root(mas->node)) {
6618                 mas_ascend(mas);
6619                 if (mas->offset) {
6620                         mas->offset--;
6621                         do {
6622                                 mas_descend(mas);
6623                                 mas->offset = mas_data_end(mas);
6624                         } while (!mte_is_leaf(mas->node));
6625 
6626                         mas_ascend(mas);
6627                 }
6628 
6629                 node = mte_to_node(mas->node);
6630                 type = mte_node_type(mas->node);
6631                 slots = ma_slots(node, type);
6632                 count = mas_data_end(mas) + 1;
6633                 for (i = 0; i < count; i++)
6634                         ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
6635                 mt_free_bulk(count, slots);
6636         }
6637 
6638         node = mte_to_node(mas->node);
6639         mt_free_one(node);
6640 }
6641 
6642 /*
6643  * mas_copy_node() - Copy a maple node and replace the parent.
6644  * @mas: The maple state of source tree.
6645  * @new_mas: The maple state of new tree.
6646  * @parent: The parent of the new node.
6647  *
6648  * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6649  * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6650  */
6651 static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
6652                 struct maple_pnode *parent)
6653 {
6654         struct maple_node *node = mte_to_node(mas->node);
6655         struct maple_node *new_node = mte_to_node(new_mas->node);
6656         unsigned long val;
6657 
6658         /* Copy the node completely. */
6659         memcpy(new_node, node, sizeof(struct maple_node));
6660         /* Update the parent node pointer. */
6661         val = (unsigned long)node->parent & MAPLE_NODE_MASK;
6662         new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
6663 }
6664 
6665 /*
6666  * mas_dup_alloc() - Allocate child nodes for a maple node.
6667  * @mas: The maple state of source tree.
6668  * @new_mas: The maple state of new tree.
6669  * @gfp: The GFP_FLAGS to use for allocations.
6670  *
6671  * This function allocates child nodes for @new_mas->node during the duplication
6672  * process. If memory allocation fails, @mas is set to -ENOMEM.
6673  */
6674 static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
6675                 gfp_t gfp)
6676 {
6677         struct maple_node *node = mte_to_node(mas->node);
6678         struct maple_node *new_node = mte_to_node(new_mas->node);
6679         enum maple_type type;
6680         unsigned char request, count, i;
6681         void __rcu **slots;
6682         void __rcu **new_slots;
6683         unsigned long val;
6684 
6685         /* Allocate memory for child nodes. */
6686         type = mte_node_type(mas->node);
6687         new_slots = ma_slots(new_node, type);
6688         request = mas_data_end(mas) + 1;
6689         count = mt_alloc_bulk(gfp, request, (void **)new_slots);
6690         if (unlikely(count < request)) {
6691                 memset(new_slots, 0, request * sizeof(void *));
6692                 mas_set_err(mas, -ENOMEM);
6693                 return;
6694         }
6695 
6696         /* Restore node type information in slots. */
6697         slots = ma_slots(node, type);
6698         for (i = 0; i < count; i++) {
6699                 val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
6700                 val &= MAPLE_NODE_MASK;
6701                 ((unsigned long *)new_slots)[i] |= val;
6702         }
6703 }
6704 
6705 /*
6706  * mas_dup_build() - Build a new maple tree from a source tree
6707  * @mas: The maple state of source tree, need to be in MAS_START state.
6708  * @new_mas: The maple state of new tree, need to be in MAS_START state.
6709  * @gfp: The GFP_FLAGS to use for allocations.
6710  *
6711  * This function builds a new tree in DFS preorder. If the memory allocation
6712  * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6713  * last node. mas_dup_free() will free the incomplete duplication of a tree.
6714  *
6715  * Note that the attributes of the two trees need to be exactly the same, and the
6716  * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6717  */
6718 static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
6719                 gfp_t gfp)
6720 {
6721         struct maple_node *node;
6722         struct maple_pnode *parent = NULL;
6723         struct maple_enode *root;
6724         enum maple_type type;
6725 
6726         if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
6727             unlikely(!mtree_empty(new_mas->tree))) {
6728                 mas_set_err(mas, -EINVAL);
6729                 return;
6730         }
6731 
6732         root = mas_start(mas);
6733         if (mas_is_ptr(mas) || mas_is_none(mas))
6734                 goto set_new_tree;
6735 
6736         node = mt_alloc_one(gfp);
6737         if (!node) {
6738                 new_mas->status = ma_none;
6739                 mas_set_err(mas, -ENOMEM);
6740                 return;
6741         }
6742 
6743         type = mte_node_type(mas->node);
6744         root = mt_mk_node(node, type);
6745         new_mas->node = root;
6746         new_mas->min = 0;
6747         new_mas->max = ULONG_MAX;
6748         root = mte_mk_root(root);
6749         while (1) {
6750                 mas_copy_node(mas, new_mas, parent);
6751                 if (!mte_is_leaf(mas->node)) {
6752                         /* Only allocate child nodes for non-leaf nodes. */
6753                         mas_dup_alloc(mas, new_mas, gfp);
6754                         if (unlikely(mas_is_err(mas)))
6755                                 return;
6756                 } else {
6757                         /*
6758                          * This is the last leaf node and duplication is
6759                          * completed.
6760                          */
6761                         if (mas->max == ULONG_MAX)
6762                                 goto done;
6763 
6764                         /* This is not the last leaf node and needs to go up. */
6765                         do {
6766                                 mas_ascend(mas);
6767                                 mas_ascend(new_mas);
6768                         } while (mas->offset == mas_data_end(mas));
6769 
6770                         /* Move to the next subtree. */
6771                         mas->offset++;
6772                         new_mas->offset++;
6773                 }
6774 
6775                 mas_descend(mas);
6776                 parent = ma_parent_ptr(mte_to_node(new_mas->node));
6777                 mas_descend(new_mas);
6778                 mas->offset = 0;
6779                 new_mas->offset = 0;
6780         }
6781 done:
6782         /* Specially handle the parent of the root node. */
6783         mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
6784 set_new_tree:
6785         /* Make them the same height */
6786         new_mas->tree->ma_flags = mas->tree->ma_flags;
6787         rcu_assign_pointer(new_mas->tree->ma_root, root);
6788 }
6789 
6790 /**
6791  * __mt_dup(): Duplicate an entire maple tree
6792  * @mt: The source maple tree
6793  * @new: The new maple tree
6794  * @gfp: The GFP_FLAGS to use for allocations
6795  *
6796  * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6797  * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6798  * new child nodes in non-leaf nodes. The new node is exactly the same as the
6799  * source node except for all the addresses stored in it. It will be faster than
6800  * traversing all elements in the source tree and inserting them one by one into
6801  * the new tree.
6802  * The user needs to ensure that the attributes of the source tree and the new
6803  * tree are the same, and the new tree needs to be an empty tree, otherwise
6804  * -EINVAL will be returned.
6805  * Note that the user needs to manually lock the source tree and the new tree.
6806  *
6807  * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6808  * the attributes of the two trees are different or the new tree is not an empty
6809  * tree.
6810  */
6811 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6812 {
6813         int ret = 0;
6814         MA_STATE(mas, mt, 0, 0);
6815         MA_STATE(new_mas, new, 0, 0);
6816 
6817         mas_dup_build(&mas, &new_mas, gfp);
6818         if (unlikely(mas_is_err(&mas))) {
6819                 ret = xa_err(mas.node);
6820                 if (ret == -ENOMEM)
6821                         mas_dup_free(&new_mas);
6822         }
6823 
6824         return ret;
6825 }
6826 EXPORT_SYMBOL(__mt_dup);
6827 
6828 /**
6829  * mtree_dup(): Duplicate an entire maple tree
6830  * @mt: The source maple tree
6831  * @new: The new maple tree
6832  * @gfp: The GFP_FLAGS to use for allocations
6833  *
6834  * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6835  * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6836  * new child nodes in non-leaf nodes. The new node is exactly the same as the
6837  * source node except for all the addresses stored in it. It will be faster than
6838  * traversing all elements in the source tree and inserting them one by one into
6839  * the new tree.
6840  * The user needs to ensure that the attributes of the source tree and the new
6841  * tree are the same, and the new tree needs to be an empty tree, otherwise
6842  * -EINVAL will be returned.
6843  *
6844  * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6845  * the attributes of the two trees are different or the new tree is not an empty
6846  * tree.
6847  */
6848 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6849 {
6850         int ret = 0;
6851         MA_STATE(mas, mt, 0, 0);
6852         MA_STATE(new_mas, new, 0, 0);
6853 
6854         mas_lock(&new_mas);
6855         mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
6856         mas_dup_build(&mas, &new_mas, gfp);
6857         mas_unlock(&mas);
6858         if (unlikely(mas_is_err(&mas))) {
6859                 ret = xa_err(mas.node);
6860                 if (ret == -ENOMEM)
6861                         mas_dup_free(&new_mas);
6862         }
6863 
6864         mas_unlock(&new_mas);
6865         return ret;
6866 }
6867 EXPORT_SYMBOL(mtree_dup);
6868 
6869 /**
6870  * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6871  * @mt: The maple tree
6872  *
6873  * Note: Does not handle locking.
6874  */
6875 void __mt_destroy(struct maple_tree *mt)
6876 {
6877         void *root = mt_root_locked(mt);
6878 
6879         rcu_assign_pointer(mt->ma_root, NULL);
6880         if (xa_is_node(root))
6881                 mte_destroy_walk(root, mt);
6882 
6883         mt->ma_flags = mt_attr(mt);
6884 }
6885 EXPORT_SYMBOL_GPL(__mt_destroy);
6886 
6887 /**
6888  * mtree_destroy() - Destroy a maple tree
6889  * @mt: The maple tree
6890  *
6891  * Frees all resources used by the tree.  Handles locking.
6892  */
6893 void mtree_destroy(struct maple_tree *mt)
6894 {
6895         mtree_lock(mt);
6896         __mt_destroy(mt);
6897         mtree_unlock(mt);
6898 }
6899 EXPORT_SYMBOL(mtree_destroy);
6900 
6901 /**
6902  * mt_find() - Search from the start up until an entry is found.
6903  * @mt: The maple tree
6904  * @index: Pointer which contains the start location of the search
6905  * @max: The maximum value of the search range
6906  *
6907  * Takes RCU read lock internally to protect the search, which does not
6908  * protect the returned pointer after dropping RCU read lock.
6909  * See also: Documentation/core-api/maple_tree.rst
6910  *
6911  * In case that an entry is found @index is updated to point to the next
6912  * possible entry independent whether the found entry is occupying a
6913  * single index or a range if indices.
6914  *
6915  * Return: The entry at or after the @index or %NULL
6916  */
6917 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6918 {
6919         MA_STATE(mas, mt, *index, *index);
6920         void *entry;
6921 #ifdef CONFIG_DEBUG_MAPLE_TREE
6922         unsigned long copy = *index;
6923 #endif
6924 
6925         trace_ma_read(__func__, &mas);
6926 
6927         if ((*index) > max)
6928                 return NULL;
6929 
6930         rcu_read_lock();
6931 retry:
6932         entry = mas_state_walk(&mas);
6933         if (mas_is_start(&mas))
6934                 goto retry;
6935 
6936         if (unlikely(xa_is_zero(entry)))
6937                 entry = NULL;
6938 
6939         if (entry)
6940                 goto unlock;
6941 
6942         while (mas_is_active(&mas) && (mas.last < max)) {
6943                 entry = mas_next_entry(&mas, max);
6944                 if (likely(entry && !xa_is_zero(entry)))
6945                         break;
6946         }
6947 
6948         if (unlikely(xa_is_zero(entry)))
6949                 entry = NULL;
6950 unlock:
6951         rcu_read_unlock();
6952         if (likely(entry)) {
6953                 *index = mas.last + 1;
6954 #ifdef CONFIG_DEBUG_MAPLE_TREE
6955                 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6956                         pr_err("index not increased! %lx <= %lx\n",
6957                                *index, copy);
6958 #endif
6959         }
6960 
6961         return entry;
6962 }
6963 EXPORT_SYMBOL(mt_find);
6964 
6965 /**
6966  * mt_find_after() - Search from the start up until an entry is found.
6967  * @mt: The maple tree
6968  * @index: Pointer which contains the start location of the search
6969  * @max: The maximum value to check
6970  *
6971  * Same as mt_find() except that it checks @index for 0 before
6972  * searching. If @index == 0, the search is aborted. This covers a wrap
6973  * around of @index to 0 in an iterator loop.
6974  *
6975  * Return: The entry at or after the @index or %NULL
6976  */
6977 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6978                     unsigned long max)
6979 {
6980         if (!(*index))
6981                 return NULL;
6982 
6983         return mt_find(mt, index, max);
6984 }
6985 EXPORT_SYMBOL(mt_find_after);
6986 
6987 #ifdef CONFIG_DEBUG_MAPLE_TREE
6988 atomic_t maple_tree_tests_run;
6989 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6990 atomic_t maple_tree_tests_passed;
6991 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6992 
6993 #ifndef __KERNEL__
6994 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6995 void mt_set_non_kernel(unsigned int val)
6996 {
6997         kmem_cache_set_non_kernel(maple_node_cache, val);
6998 }
6999 
7000 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
7001 unsigned long mt_get_alloc_size(void)
7002 {
7003         return kmem_cache_get_alloc(maple_node_cache);
7004 }
7005 
7006 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
7007 void mt_zero_nr_tallocated(void)
7008 {
7009         kmem_cache_zero_nr_tallocated(maple_node_cache);
7010 }
7011 
7012 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
7013 unsigned int mt_nr_tallocated(void)
7014 {
7015         return kmem_cache_nr_tallocated(maple_node_cache);
7016 }
7017 
7018 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
7019 unsigned int mt_nr_allocated(void)
7020 {
7021         return kmem_cache_nr_allocated(maple_node_cache);
7022 }
7023 
7024 void mt_cache_shrink(void)
7025 {
7026 }
7027 #else
7028 /*
7029  * mt_cache_shrink() - For testing, don't use this.
7030  *
7031  * Certain testcases can trigger an OOM when combined with other memory
7032  * debugging configuration options.  This function is used to reduce the
7033  * possibility of an out of memory even due to kmem_cache objects remaining
7034  * around for longer than usual.
7035  */
7036 void mt_cache_shrink(void)
7037 {
7038         kmem_cache_shrink(maple_node_cache);
7039 
7040 }
7041 EXPORT_SYMBOL_GPL(mt_cache_shrink);
7042 
7043 #endif /* not defined __KERNEL__ */
7044 /*
7045  * mas_get_slot() - Get the entry in the maple state node stored at @offset.
7046  * @mas: The maple state
7047  * @offset: The offset into the slot array to fetch.
7048  *
7049  * Return: The entry stored at @offset.
7050  */
7051 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
7052                 unsigned char offset)
7053 {
7054         return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
7055                         offset);
7056 }
7057 
7058 /* Depth first search, post-order */
7059 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
7060 {
7061 
7062         struct maple_enode *p, *mn = mas->node;
7063         unsigned long p_min, p_max;
7064 
7065         mas_next_node(mas, mas_mn(mas), max);
7066         if (!mas_is_overflow(mas))
7067                 return;
7068 
7069         if (mte_is_root(mn))
7070                 return;
7071 
7072         mas->node = mn;
7073         mas_ascend(mas);
7074         do {
7075                 p = mas->node;
7076                 p_min = mas->min;
7077                 p_max = mas->max;
7078                 mas_prev_node(mas, 0);
7079         } while (!mas_is_underflow(mas));
7080 
7081         mas->node = p;
7082         mas->max = p_max;
7083         mas->min = p_min;
7084 }
7085 
7086 /* Tree validations */
7087 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7088                 unsigned long min, unsigned long max, unsigned int depth,
7089                 enum mt_dump_format format);
7090 static void mt_dump_range(unsigned long min, unsigned long max,
7091                           unsigned int depth, enum mt_dump_format format)
7092 {
7093         static const char spaces[] = "                                ";
7094 
7095         switch(format) {
7096         case mt_dump_hex:
7097                 if (min == max)
7098                         pr_info("%.*s%lx: ", depth * 2, spaces, min);
7099                 else
7100                         pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
7101                 break;
7102         case mt_dump_dec:
7103                 if (min == max)
7104                         pr_info("%.*s%lu: ", depth * 2, spaces, min);
7105                 else
7106                         pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
7107         }
7108 }
7109 
7110 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
7111                           unsigned int depth, enum mt_dump_format format)
7112 {
7113         mt_dump_range(min, max, depth, format);
7114 
7115         if (xa_is_value(entry))
7116                 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
7117                                 xa_to_value(entry), entry);
7118         else if (xa_is_zero(entry))
7119                 pr_cont("zero (%ld)\n", xa_to_internal(entry));
7120         else if (mt_is_reserved(entry))
7121                 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
7122         else
7123                 pr_cont("%p\n", entry);
7124 }
7125 
7126 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
7127                 unsigned long min, unsigned long max, unsigned int depth,
7128                 enum mt_dump_format format)
7129 {
7130         struct maple_range_64 *node = &mte_to_node(entry)->mr64;
7131         bool leaf = mte_is_leaf(entry);
7132         unsigned long first = min;
7133         int i;
7134 
7135         pr_cont(" contents: ");
7136         for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
7137                 switch(format) {
7138                 case mt_dump_hex:
7139                         pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7140                         break;
7141                 case mt_dump_dec:
7142                         pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7143                 }
7144         }
7145         pr_cont("%p\n", node->slot[i]);
7146         for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
7147                 unsigned long last = max;
7148 
7149                 if (i < (MAPLE_RANGE64_SLOTS - 1))
7150                         last = node->pivot[i];
7151                 else if (!node->slot[i] && max != mt_node_max(entry))
7152                         break;
7153                 if (last == 0 && i > 0)
7154                         break;
7155                 if (leaf)
7156                         mt_dump_entry(mt_slot(mt, node->slot, i),
7157                                         first, last, depth + 1, format);
7158                 else if (node->slot[i])
7159                         mt_dump_node(mt, mt_slot(mt, node->slot, i),
7160                                         first, last, depth + 1, format);
7161 
7162                 if (last == max)
7163                         break;
7164                 if (last > max) {
7165                         switch(format) {
7166                         case mt_dump_hex:
7167                                 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
7168                                         node, last, max, i);
7169                                 break;
7170                         case mt_dump_dec:
7171                                 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7172                                         node, last, max, i);
7173                         }
7174                 }
7175                 first = last + 1;
7176         }
7177 }
7178 
7179 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
7180         unsigned long min, unsigned long max, unsigned int depth,
7181         enum mt_dump_format format)
7182 {
7183         struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
7184         bool leaf = mte_is_leaf(entry);
7185         unsigned long first = min;
7186         int i;
7187 
7188         pr_cont(" contents: ");
7189         for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7190                 switch (format) {
7191                 case mt_dump_hex:
7192                         pr_cont("%lx ", node->gap[i]);
7193                         break;
7194                 case mt_dump_dec:
7195                         pr_cont("%lu ", node->gap[i]);
7196                 }
7197         }
7198         pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
7199         for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
7200                 switch (format) {
7201                 case mt_dump_hex:
7202                         pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7203                         break;
7204                 case mt_dump_dec:
7205                         pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7206                 }
7207         }
7208         pr_cont("%p\n", node->slot[i]);
7209         for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7210                 unsigned long last = max;
7211 
7212                 if (i < (MAPLE_ARANGE64_SLOTS - 1))
7213                         last = node->pivot[i];
7214                 else if (!node->slot[i])
7215                         break;
7216                 if (last == 0 && i > 0)
7217                         break;
7218                 if (leaf)
7219                         mt_dump_entry(mt_slot(mt, node->slot, i),
7220                                         first, last, depth + 1, format);
7221                 else if (node->slot[i])
7222                         mt_dump_node(mt, mt_slot(mt, node->slot, i),
7223                                         first, last, depth + 1, format);
7224 
7225                 if (last == max)
7226                         break;
7227                 if (last > max) {
7228                         pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7229                                         node, last, max, i);
7230                         break;
7231                 }
7232                 first = last + 1;
7233         }
7234 }
7235 
7236 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7237                 unsigned long min, unsigned long max, unsigned int depth,
7238                 enum mt_dump_format format)
7239 {
7240         struct maple_node *node = mte_to_node(entry);
7241         unsigned int type = mte_node_type(entry);
7242         unsigned int i;
7243 
7244         mt_dump_range(min, max, depth, format);
7245 
7246         pr_cont("node %p depth %d type %d parent %p", node, depth, type,
7247                         node ? node->parent : NULL);
7248         switch (type) {
7249         case maple_dense:
7250                 pr_cont("\n");
7251                 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
7252                         if (min + i > max)
7253                                 pr_cont("OUT OF RANGE: ");
7254                         mt_dump_entry(mt_slot(mt, node->slot, i),
7255                                         min + i, min + i, depth, format);
7256                 }
7257                 break;
7258         case maple_leaf_64:
7259         case maple_range_64:
7260                 mt_dump_range64(mt, entry, min, max, depth, format);
7261                 break;
7262         case maple_arange_64:
7263                 mt_dump_arange64(mt, entry, min, max, depth, format);
7264                 break;
7265 
7266         default:
7267                 pr_cont(" UNKNOWN TYPE\n");
7268         }
7269 }
7270 
7271 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
7272 {
7273         void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
7274 
7275         pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7276                  mt, mt->ma_flags, mt_height(mt), entry);
7277         if (!xa_is_node(entry))
7278                 mt_dump_entry(entry, 0, 0, 0, format);
7279         else if (entry)
7280                 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
7281 }
7282 EXPORT_SYMBOL_GPL(mt_dump);
7283 
7284 /*
7285  * Calculate the maximum gap in a node and check if that's what is reported in
7286  * the parent (unless root).
7287  */
7288 static void mas_validate_gaps(struct ma_state *mas)
7289 {
7290         struct maple_enode *mte = mas->node;
7291         struct maple_node *p_mn, *node = mte_to_node(mte);
7292         enum maple_type mt = mte_node_type(mas->node);
7293         unsigned long gap = 0, max_gap = 0;
7294         unsigned long p_end, p_start = mas->min;
7295         unsigned char p_slot, offset;
7296         unsigned long *gaps = NULL;
7297         unsigned long *pivots = ma_pivots(node, mt);
7298         unsigned int i;
7299 
7300         if (ma_is_dense(mt)) {
7301                 for (i = 0; i < mt_slot_count(mte); i++) {
7302                         if (mas_get_slot(mas, i)) {
7303                                 if (gap > max_gap)
7304                                         max_gap = gap;
7305                                 gap = 0;
7306                                 continue;
7307                         }
7308                         gap++;
7309                 }
7310                 goto counted;
7311         }
7312 
7313         gaps = ma_gaps(node, mt);
7314         for (i = 0; i < mt_slot_count(mte); i++) {
7315                 p_end = mas_safe_pivot(mas, pivots, i, mt);
7316 
7317                 if (!gaps) {
7318                         if (!mas_get_slot(mas, i))
7319                                 gap = p_end - p_start + 1;
7320                 } else {
7321                         void *entry = mas_get_slot(mas, i);
7322 
7323                         gap = gaps[i];
7324                         MT_BUG_ON(mas->tree, !entry);
7325 
7326                         if (gap > p_end - p_start + 1) {
7327                                 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7328                                        mas_mn(mas), i, gap, p_end, p_start,
7329                                        p_end - p_start + 1);
7330                                 MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
7331                         }
7332                 }
7333 
7334                 if (gap > max_gap)
7335                         max_gap = gap;
7336 
7337                 p_start = p_end + 1;
7338                 if (p_end >= mas->max)
7339                         break;
7340         }
7341 
7342 counted:
7343         if (mt == maple_arange_64) {
7344                 MT_BUG_ON(mas->tree, !gaps);
7345                 offset = ma_meta_gap(node);
7346                 if (offset > i) {
7347                         pr_err("gap offset %p[%u] is invalid\n", node, offset);
7348                         MT_BUG_ON(mas->tree, 1);
7349                 }
7350 
7351                 if (gaps[offset] != max_gap) {
7352                         pr_err("gap %p[%u] is not the largest gap %lu\n",
7353                                node, offset, max_gap);
7354                         MT_BUG_ON(mas->tree, 1);
7355                 }
7356 
7357                 for (i++ ; i < mt_slot_count(mte); i++) {
7358                         if (gaps[i] != 0) {
7359                                 pr_err("gap %p[%u] beyond node limit != 0\n",
7360                                        node, i);
7361                                 MT_BUG_ON(mas->tree, 1);
7362                         }
7363                 }
7364         }
7365 
7366         if (mte_is_root(mte))
7367                 return;
7368 
7369         p_slot = mte_parent_slot(mas->node);
7370         p_mn = mte_parent(mte);
7371         MT_BUG_ON(mas->tree, max_gap > mas->max);
7372         if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7373                 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7374                 mt_dump(mas->tree, mt_dump_hex);
7375                 MT_BUG_ON(mas->tree, 1);
7376         }
7377 }
7378 
7379 static void mas_validate_parent_slot(struct ma_state *mas)
7380 {
7381         struct maple_node *parent;
7382         struct maple_enode *node;
7383         enum maple_type p_type;
7384         unsigned char p_slot;
7385         void __rcu **slots;
7386         int i;
7387 
7388         if (mte_is_root(mas->node))
7389                 return;
7390 
7391         p_slot = mte_parent_slot(mas->node);
7392         p_type = mas_parent_type(mas, mas->node);
7393         parent = mte_parent(mas->node);
7394         slots = ma_slots(parent, p_type);
7395         MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7396 
7397         /* Check prev/next parent slot for duplicate node entry */
7398 
7399         for (i = 0; i < mt_slots[p_type]; i++) {
7400                 node = mas_slot(mas, slots, i);
7401                 if (i == p_slot) {
7402                         if (node != mas->node)
7403                                 pr_err("parent %p[%u] does not have %p\n",
7404                                         parent, i, mas_mn(mas));
7405                         MT_BUG_ON(mas->tree, node != mas->node);
7406                 } else if (node == mas->node) {
7407                         pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7408                                mas_mn(mas), parent, i, p_slot);
7409                         MT_BUG_ON(mas->tree, node == mas->node);
7410                 }
7411         }
7412 }
7413 
7414 static void mas_validate_child_slot(struct ma_state *mas)
7415 {
7416         enum maple_type type = mte_node_type(mas->node);
7417         void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7418         unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7419         struct maple_enode *child;
7420         unsigned char i;
7421 
7422         if (mte_is_leaf(mas->node))
7423                 return;
7424 
7425         for (i = 0; i < mt_slots[type]; i++) {
7426                 child = mas_slot(mas, slots, i);
7427 
7428                 if (!child) {
7429                         pr_err("Non-leaf node lacks child at %p[%u]\n",
7430                                mas_mn(mas), i);
7431                         MT_BUG_ON(mas->tree, 1);
7432                 }
7433 
7434                 if (mte_parent_slot(child) != i) {
7435                         pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7436                                mas_mn(mas), i, mte_to_node(child),
7437                                mte_parent_slot(child));
7438                         MT_BUG_ON(mas->tree, 1);
7439                 }
7440 
7441                 if (mte_parent(child) != mte_to_node(mas->node)) {
7442                         pr_err("child %p has parent %p not %p\n",
7443                                mte_to_node(child), mte_parent(child),
7444                                mte_to_node(mas->node));
7445                         MT_BUG_ON(mas->tree, 1);
7446                 }
7447 
7448                 if (i < mt_pivots[type] && pivots[i] == mas->max)
7449                         break;
7450         }
7451 }
7452 
7453 /*
7454  * Validate all pivots are within mas->min and mas->max, check metadata ends
7455  * where the maximum ends and ensure there is no slots or pivots set outside of
7456  * the end of the data.
7457  */
7458 static void mas_validate_limits(struct ma_state *mas)
7459 {
7460         int i;
7461         unsigned long prev_piv = 0;
7462         enum maple_type type = mte_node_type(mas->node);
7463         void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7464         unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7465 
7466         for (i = 0; i < mt_slots[type]; i++) {
7467                 unsigned long piv;
7468 
7469                 piv = mas_safe_pivot(mas, pivots, i, type);
7470 
7471                 if (!piv && (i != 0)) {
7472                         pr_err("Missing node limit pivot at %p[%u]",
7473                                mas_mn(mas), i);
7474                         MAS_WARN_ON(mas, 1);
7475                 }
7476 
7477                 if (prev_piv > piv) {
7478                         pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7479                                 mas_mn(mas), i, piv, prev_piv);
7480                         MAS_WARN_ON(mas, piv < prev_piv);
7481                 }
7482 
7483                 if (piv < mas->min) {
7484                         pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7485                                 piv, mas->min);
7486                         MAS_WARN_ON(mas, piv < mas->min);
7487                 }
7488                 if (piv > mas->max) {
7489                         pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7490                                 piv, mas->max);
7491                         MAS_WARN_ON(mas, piv > mas->max);
7492                 }
7493                 prev_piv = piv;
7494                 if (piv == mas->max)
7495                         break;
7496         }
7497 
7498         if (mas_data_end(mas) != i) {
7499                 pr_err("node%p: data_end %u != the last slot offset %u\n",
7500                        mas_mn(mas), mas_data_end(mas), i);
7501                 MT_BUG_ON(mas->tree, 1);
7502         }
7503 
7504         for (i += 1; i < mt_slots[type]; i++) {
7505                 void *entry = mas_slot(mas, slots, i);
7506 
7507                 if (entry && (i != mt_slots[type] - 1)) {
7508                         pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7509                                i, entry);
7510                         MT_BUG_ON(mas->tree, entry != NULL);
7511                 }
7512 
7513                 if (i < mt_pivots[type]) {
7514                         unsigned long piv = pivots[i];
7515 
7516                         if (!piv)
7517                                 continue;
7518 
7519                         pr_err("%p[%u] should not have piv %lu\n",
7520                                mas_mn(mas), i, piv);
7521                         MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7522                 }
7523         }
7524 }
7525 
7526 static void mt_validate_nulls(struct maple_tree *mt)
7527 {
7528         void *entry, *last = (void *)1;
7529         unsigned char offset = 0;
7530         void __rcu **slots;
7531         MA_STATE(mas, mt, 0, 0);
7532 
7533         mas_start(&mas);
7534         if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
7535                 return;
7536 
7537         while (!mte_is_leaf(mas.node))
7538                 mas_descend(&mas);
7539 
7540         slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7541         do {
7542                 entry = mas_slot(&mas, slots, offset);
7543                 if (!last && !entry) {
7544                         pr_err("Sequential nulls end at %p[%u]\n",
7545                                 mas_mn(&mas), offset);
7546                 }
7547                 MT_BUG_ON(mt, !last && !entry);
7548                 last = entry;
7549                 if (offset == mas_data_end(&mas)) {
7550                         mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7551                         if (mas_is_overflow(&mas))
7552                                 return;
7553                         offset = 0;
7554                         slots = ma_slots(mte_to_node(mas.node),
7555                                          mte_node_type(mas.node));
7556                 } else {
7557                         offset++;
7558                 }
7559 
7560         } while (!mas_is_overflow(&mas));
7561 }
7562 
7563 /*
7564  * validate a maple tree by checking:
7565  * 1. The limits (pivots are within mas->min to mas->max)
7566  * 2. The gap is correctly set in the parents
7567  */
7568 void mt_validate(struct maple_tree *mt)
7569         __must_hold(mas->tree->ma_lock)
7570 {
7571         unsigned char end;
7572 
7573         MA_STATE(mas, mt, 0, 0);
7574         mas_start(&mas);
7575         if (!mas_is_active(&mas))
7576                 return;
7577 
7578         while (!mte_is_leaf(mas.node))
7579                 mas_descend(&mas);
7580 
7581         while (!mas_is_overflow(&mas)) {
7582                 MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7583                 end = mas_data_end(&mas);
7584                 if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7585                                 (mas.max != ULONG_MAX))) {
7586                         pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
7587                 }
7588 
7589                 mas_validate_parent_slot(&mas);
7590                 mas_validate_limits(&mas);
7591                 mas_validate_child_slot(&mas);
7592                 if (mt_is_alloc(mt))
7593                         mas_validate_gaps(&mas);
7594                 mas_dfs_postorder(&mas, ULONG_MAX);
7595         }
7596         mt_validate_nulls(mt);
7597 }
7598 EXPORT_SYMBOL_GPL(mt_validate);
7599 
7600 void mas_dump(const struct ma_state *mas)
7601 {
7602         pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7603         switch (mas->status) {
7604         case ma_active:
7605                 pr_err("(ma_active)");
7606                 break;
7607         case ma_none:
7608                 pr_err("(ma_none)");
7609                 break;
7610         case ma_root:
7611                 pr_err("(ma_root)");
7612                 break;
7613         case ma_start:
7614                 pr_err("(ma_start) ");
7615                 break;
7616         case ma_pause:
7617                 pr_err("(ma_pause) ");
7618                 break;
7619         case ma_overflow:
7620                 pr_err("(ma_overflow) ");
7621                 break;
7622         case ma_underflow:
7623                 pr_err("(ma_underflow) ");
7624                 break;
7625         case ma_error:
7626                 pr_err("(ma_error) ");
7627                 break;
7628         }
7629 
7630         pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
7631                mas->index, mas->last);
7632         pr_err("     min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7633                mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7634         if (mas->index > mas->last)
7635                 pr_err("Check index & last\n");
7636 }
7637 EXPORT_SYMBOL_GPL(mas_dump);
7638 
7639 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7640 {
7641         pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7642                wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7643         pr_err("        type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7644                wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
7645                wr_mas->end_piv);
7646 }
7647 EXPORT_SYMBOL_GPL(mas_wr_dump);
7648 
7649 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7650 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php