]> git.dujemihanovic.xyz Git - linux.git/blob
1108495
[linux.git] /
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
7 */
8
9 /*
10 * DOC: Interesting implementation details of the Maple Tree
11 *
12 * Each node type has a number of slots for entries and a number of slots for
13 * pivots. In the case of dense nodes, the pivots are implied by the position
14 * and are simply the slot index + the minimum of the node.
15 *
16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 * indicate that the tree is specifying ranges, Pivots may appear in the
18 * subtree with an entry attached to the value where as keys are unique to a
19 * specific position of a B-tree. Pivot values are inclusive of the slot with
20 * the same index.
21 *
22 *
23 * The following illustrates the layout of a range64 nodes slots and pivots.
24 *
25 *
26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
28 * │ │ │ │ │ │ │ │ └─ Implied maximum
29 * │ │ │ │ │ │ │ └─ Pivot 14
30 * │ │ │ │ │ │ └─ Pivot 13
31 * │ │ │ │ │ └─ Pivot 12
32 * │ │ │ │ └─ Pivot 11
33 * │ │ │ └─ Pivot 2
34 * │ │ └─ Pivot 1
35 * │ └─ Pivot 0
36 * └─ Implied minimum
37 *
38 * Slot contents:
39 * Internal (non-leaf) nodes contain pointers to other nodes.
40 * Leaf nodes contain entries.
41 *
42 * The location of interest is often referred to as an offset. All offsets have
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
45 *
46 * Ranges complicate certain write activities. When modifying any of
47 * the B-tree variants, it is known that one entry will either be added or
48 * deleted. When modifying the Maple Tree, one store operation may overwrite
49 * the entire data set, or one half of the tree, or the middle half of the tree.
50 *
51 */
52
53
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
61
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
64
65 #define MA_ROOT_PARENT 1
66
67 /*
68 * Maple state flags
69 * * MA_STATE_BULK - Bulk insert mode
70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
72 */
73 #define MA_STATE_BULK 1
74 #define MA_STATE_REBALANCE 2
75 #define MA_STATE_PREALLOC 4
76
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
81
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 [maple_dense] = MAPLE_NODE_SLOTS,
85 [maple_leaf_64] = ULONG_MAX,
86 [maple_range_64] = ULONG_MAX,
87 [maple_arange_64] = ULONG_MAX,
88 };
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
90 #endif
91
92 static const unsigned char mt_slots[] = {
93 [maple_dense] = MAPLE_NODE_SLOTS,
94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
97 };
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
99
100 static const unsigned char mt_pivots[] = {
101 [maple_dense] = 0,
102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
105 };
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
107
108 static const unsigned char mt_min_slots[] = {
109 [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
113 };
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
115
116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
118
119 struct maple_big_node {
120 struct maple_pnode *parent;
121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
122 union {
123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
124 struct {
125 unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 unsigned long gap[MAPLE_BIG_NODE_GAPS];
127 };
128 };
129 unsigned char b_end;
130 enum maple_type type;
131 };
132
133 /*
134 * The maple_subtree_state is used to build a tree to replace a segment of an
135 * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 * dead node and restart on updates.
137 */
138 struct maple_subtree_state {
139 struct ma_state *orig_l; /* Original left side of subtree */
140 struct ma_state *orig_r; /* Original right side of subtree */
141 struct ma_state *l; /* New left side of subtree */
142 struct ma_state *m; /* New middle of subtree (rare) */
143 struct ma_state *r; /* New right side of subtree */
144 struct ma_topiary *free; /* nodes to be freed */
145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 struct maple_big_node *bn;
147 };
148
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
152 #else
153 #define noinline_for_kasan inline
154 #endif
155
156 /* Functions */
157 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
158 {
159 return kmem_cache_alloc(maple_node_cache, gfp);
160 }
161
162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
163 {
164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
165 }
166
167 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
168 {
169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
170 }
171
172 static void mt_free_rcu(struct rcu_head *head)
173 {
174 struct maple_node *node = container_of(head, struct maple_node, rcu);
175
176 kmem_cache_free(maple_node_cache, node);
177 }
178
179 /*
180 * ma_free_rcu() - Use rcu callback to free a maple node
181 * @node: The node to free
182 *
183 * The maple tree uses the parent pointer to indicate this node is no longer in
184 * use and will be freed.
185 */
186 static void ma_free_rcu(struct maple_node *node)
187 {
188 WARN_ON(node->parent != ma_parent_ptr(node));
189 call_rcu(&node->rcu, mt_free_rcu);
190 }
191
192 static void mas_set_height(struct ma_state *mas)
193 {
194 unsigned int new_flags = mas->tree->ma_flags;
195
196 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 mas->tree->ma_flags = new_flags;
200 }
201
202 static unsigned int mas_mt_height(struct ma_state *mas)
203 {
204 return mt_height(mas->tree);
205 }
206
207 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
208 {
209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 MAPLE_NODE_TYPE_MASK;
211 }
212
213 static inline bool ma_is_dense(const enum maple_type type)
214 {
215 return type < maple_leaf_64;
216 }
217
218 static inline bool ma_is_leaf(const enum maple_type type)
219 {
220 return type < maple_range_64;
221 }
222
223 static inline bool mte_is_leaf(const struct maple_enode *entry)
224 {
225 return ma_is_leaf(mte_node_type(entry));
226 }
227
228 /*
229 * We also reserve values with the bottom two bits set to '10' which are
230 * below 4096
231 */
232 static inline bool mt_is_reserved(const void *entry)
233 {
234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 xa_is_internal(entry);
236 }
237
238 static inline void mas_set_err(struct ma_state *mas, long err)
239 {
240 mas->node = MA_ERROR(err);
241 }
242
243 static inline bool mas_is_ptr(const struct ma_state *mas)
244 {
245 return mas->node == MAS_ROOT;
246 }
247
248 static inline bool mas_is_start(const struct ma_state *mas)
249 {
250 return mas->node == MAS_START;
251 }
252
253 bool mas_is_err(struct ma_state *mas)
254 {
255 return xa_is_err(mas->node);
256 }
257
258 static inline bool mas_searchable(struct ma_state *mas)
259 {
260 if (mas_is_none(mas))
261 return false;
262
263 if (mas_is_ptr(mas))
264 return false;
265
266 return true;
267 }
268
269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
270 {
271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
272 }
273
274 /*
275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 * @entry: The maple encoded node
277 *
278 * Return: a maple topiary pointer
279 */
280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
281 {
282 return (struct maple_topiary *)
283 ((unsigned long)entry & ~MAPLE_NODE_MASK);
284 }
285
286 /*
287 * mas_mn() - Get the maple state node.
288 * @mas: The maple state
289 *
290 * Return: the maple node (not encoded - bare pointer).
291 */
292 static inline struct maple_node *mas_mn(const struct ma_state *mas)
293 {
294 return mte_to_node(mas->node);
295 }
296
297 /*
298 * mte_set_node_dead() - Set a maple encoded node as dead.
299 * @mn: The maple encoded node.
300 */
301 static inline void mte_set_node_dead(struct maple_enode *mn)
302 {
303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 smp_wmb(); /* Needed for RCU */
305 }
306
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE 0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL 0x04
313
314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 enum maple_type type)
316 {
317 return (void *)((unsigned long)node |
318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
319 }
320
321 static inline void *mte_mk_root(const struct maple_enode *node)
322 {
323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
324 }
325
326 static inline void *mte_safe_root(const struct maple_enode *node)
327 {
328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
329 }
330
331 static inline void *mte_set_full(const struct maple_enode *node)
332 {
333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
334 }
335
336 static inline void *mte_clear_full(const struct maple_enode *node)
337 {
338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
339 }
340
341 static inline bool mte_has_null(const struct maple_enode *node)
342 {
343 return (unsigned long)node & MAPLE_ENODE_NULL;
344 }
345
346 static inline bool ma_is_root(struct maple_node *node)
347 {
348 return ((unsigned long)node->parent & MA_ROOT_PARENT);
349 }
350
351 static inline bool mte_is_root(const struct maple_enode *node)
352 {
353 return ma_is_root(mte_to_node(node));
354 }
355
356 static inline bool mas_is_root_limits(const struct ma_state *mas)
357 {
358 return !mas->min && mas->max == ULONG_MAX;
359 }
360
361 static inline bool mt_is_alloc(struct maple_tree *mt)
362 {
363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
364 }
365
366 /*
367 * The Parent Pointer
368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 * bit values need an extra bit to store the offset. This extra bit comes from
371 * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 * indicate if bit 2 is part of the type or the slot.
373 *
374 * Note types:
375 * 0x??1 = Root
376 * 0x?00 = 16 bit nodes
377 * 0x010 = 32 bit nodes
378 * 0x110 = 64 bit nodes
379 *
380 * Slot size and alignment
381 * 0b??1 : Root
382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
385 */
386
387 #define MAPLE_PARENT_ROOT 0x01
388
389 #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 #define MAPLE_PARENT_SLOT_MASK 0xF8
391
392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
394
395 #define MAPLE_PARENT_RANGE64 0x06
396 #define MAPLE_PARENT_RANGE32 0x04
397 #define MAPLE_PARENT_NOT_RANGE16 0x02
398
399 /*
400 * mte_parent_shift() - Get the parent shift for the slot storage.
401 * @parent: The parent pointer cast as an unsigned long
402 * Return: The shift into that pointer to the star to of the slot
403 */
404 static inline unsigned long mte_parent_shift(unsigned long parent)
405 {
406 /* Note bit 1 == 0 means 16B */
407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 return MAPLE_PARENT_SLOT_SHIFT;
409
410 return MAPLE_PARENT_16B_SLOT_SHIFT;
411 }
412
413 /*
414 * mte_parent_slot_mask() - Get the slot mask for the parent.
415 * @parent: The parent pointer cast as an unsigned long.
416 * Return: The slot mask for that parent.
417 */
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
419 {
420 /* Note bit 1 == 0 means 16B */
421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 return MAPLE_PARENT_SLOT_MASK;
423
424 return MAPLE_PARENT_16B_SLOT_MASK;
425 }
426
427 /*
428 * mas_parent_type() - Return the maple_type of the parent from the stored
429 * parent type.
430 * @mas: The maple state
431 * @enode: The maple_enode to extract the parent's enum
432 * Return: The node->parent maple_type
433 */
434 static inline
435 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
436 {
437 unsigned long p_type;
438
439 p_type = (unsigned long)mte_to_node(enode)->parent;
440 if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
441 return 0;
442
443 p_type &= MAPLE_NODE_MASK;
444 p_type &= ~mte_parent_slot_mask(p_type);
445 switch (p_type) {
446 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
447 if (mt_is_alloc(mas->tree))
448 return maple_arange_64;
449 return maple_range_64;
450 }
451
452 return 0;
453 }
454
455 /*
456 * mas_set_parent() - Set the parent node and encode the slot
457 * @enode: The encoded maple node.
458 * @parent: The encoded maple node that is the parent of @enode.
459 * @slot: The slot that @enode resides in @parent.
460 *
461 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
462 * parent type.
463 */
464 static inline
465 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
466 const struct maple_enode *parent, unsigned char slot)
467 {
468 unsigned long val = (unsigned long)parent;
469 unsigned long shift;
470 unsigned long type;
471 enum maple_type p_type = mte_node_type(parent);
472
473 MAS_BUG_ON(mas, p_type == maple_dense);
474 MAS_BUG_ON(mas, p_type == maple_leaf_64);
475
476 switch (p_type) {
477 case maple_range_64:
478 case maple_arange_64:
479 shift = MAPLE_PARENT_SLOT_SHIFT;
480 type = MAPLE_PARENT_RANGE64;
481 break;
482 default:
483 case maple_dense:
484 case maple_leaf_64:
485 shift = type = 0;
486 break;
487 }
488
489 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
490 val |= (slot << shift) | type;
491 mte_to_node(enode)->parent = ma_parent_ptr(val);
492 }
493
494 /*
495 * mte_parent_slot() - get the parent slot of @enode.
496 * @enode: The encoded maple node.
497 *
498 * Return: The slot in the parent node where @enode resides.
499 */
500 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
501 {
502 unsigned long val = (unsigned long)mte_to_node(enode)->parent;
503
504 if (val & MA_ROOT_PARENT)
505 return 0;
506
507 /*
508 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
509 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
510 */
511 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
512 }
513
514 /*
515 * mte_parent() - Get the parent of @node.
516 * @node: The encoded maple node.
517 *
518 * Return: The parent maple node.
519 */
520 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
521 {
522 return (void *)((unsigned long)
523 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
524 }
525
526 /*
527 * ma_dead_node() - check if the @enode is dead.
528 * @enode: The encoded maple node
529 *
530 * Return: true if dead, false otherwise.
531 */
532 static inline bool ma_dead_node(const struct maple_node *node)
533 {
534 struct maple_node *parent;
535
536 /* Do not reorder reads from the node prior to the parent check */
537 smp_rmb();
538 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
539 return (parent == node);
540 }
541
542 /*
543 * mte_dead_node() - check if the @enode is dead.
544 * @enode: The encoded maple node
545 *
546 * Return: true if dead, false otherwise.
547 */
548 static inline bool mte_dead_node(const struct maple_enode *enode)
549 {
550 struct maple_node *parent, *node;
551
552 node = mte_to_node(enode);
553 /* Do not reorder reads from the node prior to the parent check */
554 smp_rmb();
555 parent = mte_parent(enode);
556 return (parent == node);
557 }
558
559 /*
560 * mas_allocated() - Get the number of nodes allocated in a maple state.
561 * @mas: The maple state
562 *
563 * The ma_state alloc member is overloaded to hold a pointer to the first
564 * allocated node or to the number of requested nodes to allocate. If bit 0 is
565 * set, then the alloc contains the number of requested nodes. If there is an
566 * allocated node, then the total allocated nodes is in that node.
567 *
568 * Return: The total number of nodes allocated
569 */
570 static inline unsigned long mas_allocated(const struct ma_state *mas)
571 {
572 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
573 return 0;
574
575 return mas->alloc->total;
576 }
577
578 /*
579 * mas_set_alloc_req() - Set the requested number of allocations.
580 * @mas: the maple state
581 * @count: the number of allocations.
582 *
583 * The requested number of allocations is either in the first allocated node,
584 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
585 * no allocated node. Set the request either in the node or do the necessary
586 * encoding to store in @mas->alloc directly.
587 */
588 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
589 {
590 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
591 if (!count)
592 mas->alloc = NULL;
593 else
594 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
595 return;
596 }
597
598 mas->alloc->request_count = count;
599 }
600
601 /*
602 * mas_alloc_req() - get the requested number of allocations.
603 * @mas: The maple state
604 *
605 * The alloc count is either stored directly in @mas, or in
606 * @mas->alloc->request_count if there is at least one node allocated. Decode
607 * the request count if it's stored directly in @mas->alloc.
608 *
609 * Return: The allocation request count.
610 */
611 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
612 {
613 if ((unsigned long)mas->alloc & 0x1)
614 return (unsigned long)(mas->alloc) >> 1;
615 else if (mas->alloc)
616 return mas->alloc->request_count;
617 return 0;
618 }
619
620 /*
621 * ma_pivots() - Get a pointer to the maple node pivots.
622 * @node - the maple node
623 * @type - the node type
624 *
625 * In the event of a dead node, this array may be %NULL
626 *
627 * Return: A pointer to the maple node pivots
628 */
629 static inline unsigned long *ma_pivots(struct maple_node *node,
630 enum maple_type type)
631 {
632 switch (type) {
633 case maple_arange_64:
634 return node->ma64.pivot;
635 case maple_range_64:
636 case maple_leaf_64:
637 return node->mr64.pivot;
638 case maple_dense:
639 return NULL;
640 }
641 return NULL;
642 }
643
644 /*
645 * ma_gaps() - Get a pointer to the maple node gaps.
646 * @node - the maple node
647 * @type - the node type
648 *
649 * Return: A pointer to the maple node gaps
650 */
651 static inline unsigned long *ma_gaps(struct maple_node *node,
652 enum maple_type type)
653 {
654 switch (type) {
655 case maple_arange_64:
656 return node->ma64.gap;
657 case maple_range_64:
658 case maple_leaf_64:
659 case maple_dense:
660 return NULL;
661 }
662 return NULL;
663 }
664
665 /*
666 * mas_pivot() - Get the pivot at @piv of the maple encoded node.
667 * @mas: The maple state.
668 * @piv: The pivot.
669 *
670 * Return: the pivot at @piv of @mn.
671 */
672 static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
673 {
674 struct maple_node *node = mas_mn(mas);
675 enum maple_type type = mte_node_type(mas->node);
676
677 if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
678 mas_set_err(mas, -EIO);
679 return 0;
680 }
681
682 switch (type) {
683 case maple_arange_64:
684 return node->ma64.pivot[piv];
685 case maple_range_64:
686 case maple_leaf_64:
687 return node->mr64.pivot[piv];
688 case maple_dense:
689 return 0;
690 }
691 return 0;
692 }
693
694 /*
695 * mas_safe_pivot() - get the pivot at @piv or mas->max.
696 * @mas: The maple state
697 * @pivots: The pointer to the maple node pivots
698 * @piv: The pivot to fetch
699 * @type: The maple node type
700 *
701 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
702 * otherwise.
703 */
704 static inline unsigned long
705 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
706 unsigned char piv, enum maple_type type)
707 {
708 if (piv >= mt_pivots[type])
709 return mas->max;
710
711 return pivots[piv];
712 }
713
714 /*
715 * mas_safe_min() - Return the minimum for a given offset.
716 * @mas: The maple state
717 * @pivots: The pointer to the maple node pivots
718 * @offset: The offset into the pivot array
719 *
720 * Return: The minimum range value that is contained in @offset.
721 */
722 static inline unsigned long
723 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
724 {
725 if (likely(offset))
726 return pivots[offset - 1] + 1;
727
728 return mas->min;
729 }
730
731 /*
732 * mas_logical_pivot() - Get the logical pivot of a given offset.
733 * @mas: The maple state
734 * @pivots: The pointer to the maple node pivots
735 * @offset: The offset into the pivot array
736 * @type: The maple node type
737 *
738 * When there is no value at a pivot (beyond the end of the data), then the
739 * pivot is actually @mas->max.
740 *
741 * Return: the logical pivot of a given @offset.
742 */
743 static inline unsigned long
744 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
745 unsigned char offset, enum maple_type type)
746 {
747 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
748
749 if (likely(lpiv))
750 return lpiv;
751
752 if (likely(offset))
753 return mas->max;
754
755 return lpiv;
756 }
757
758 /*
759 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
760 * @mn: The encoded maple node
761 * @piv: The pivot offset
762 * @val: The value of the pivot
763 */
764 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
765 unsigned long val)
766 {
767 struct maple_node *node = mte_to_node(mn);
768 enum maple_type type = mte_node_type(mn);
769
770 BUG_ON(piv >= mt_pivots[type]);
771 switch (type) {
772 default:
773 case maple_range_64:
774 case maple_leaf_64:
775 node->mr64.pivot[piv] = val;
776 break;
777 case maple_arange_64:
778 node->ma64.pivot[piv] = val;
779 break;
780 case maple_dense:
781 break;
782 }
783
784 }
785
786 /*
787 * ma_slots() - Get a pointer to the maple node slots.
788 * @mn: The maple node
789 * @mt: The maple node type
790 *
791 * Return: A pointer to the maple node slots
792 */
793 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
794 {
795 switch (mt) {
796 default:
797 case maple_arange_64:
798 return mn->ma64.slot;
799 case maple_range_64:
800 case maple_leaf_64:
801 return mn->mr64.slot;
802 case maple_dense:
803 return mn->slot;
804 }
805 }
806
807 static inline bool mt_locked(const struct maple_tree *mt)
808 {
809 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
810 lockdep_is_held(&mt->ma_lock);
811 }
812
813 static inline void *mt_slot(const struct maple_tree *mt,
814 void __rcu **slots, unsigned char offset)
815 {
816 return rcu_dereference_check(slots[offset], mt_locked(mt));
817 }
818
819 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
820 unsigned char offset)
821 {
822 return rcu_dereference_protected(slots[offset], mt_locked(mt));
823 }
824 /*
825 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
826 * @mas: The maple state
827 * @slots: The pointer to the slots
828 * @offset: The offset into the slots array to fetch
829 *
830 * Return: The entry stored in @slots at the @offset.
831 */
832 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
833 unsigned char offset)
834 {
835 return mt_slot_locked(mas->tree, slots, offset);
836 }
837
838 /*
839 * mas_slot() - Get the slot value when not holding the maple tree lock.
840 * @mas: The maple state
841 * @slots: The pointer to the slots
842 * @offset: The offset into the slots array to fetch
843 *
844 * Return: The entry stored in @slots at the @offset
845 */
846 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
847 unsigned char offset)
848 {
849 return mt_slot(mas->tree, slots, offset);
850 }
851
852 /*
853 * mas_root() - Get the maple tree root.
854 * @mas: The maple state.
855 *
856 * Return: The pointer to the root of the tree
857 */
858 static inline void *mas_root(struct ma_state *mas)
859 {
860 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
861 }
862
863 static inline void *mt_root_locked(struct maple_tree *mt)
864 {
865 return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
866 }
867
868 /*
869 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
870 * @mas: The maple state.
871 *
872 * Return: The pointer to the root of the tree
873 */
874 static inline void *mas_root_locked(struct ma_state *mas)
875 {
876 return mt_root_locked(mas->tree);
877 }
878
879 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
880 enum maple_type mt)
881 {
882 switch (mt) {
883 case maple_arange_64:
884 return &mn->ma64.meta;
885 default:
886 return &mn->mr64.meta;
887 }
888 }
889
890 /*
891 * ma_set_meta() - Set the metadata information of a node.
892 * @mn: The maple node
893 * @mt: The maple node type
894 * @offset: The offset of the highest sub-gap in this node.
895 * @end: The end of the data in this node.
896 */
897 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
898 unsigned char offset, unsigned char end)
899 {
900 struct maple_metadata *meta = ma_meta(mn, mt);
901
902 meta->gap = offset;
903 meta->end = end;
904 }
905
906 /*
907 * mt_clear_meta() - clear the metadata information of a node, if it exists
908 * @mt: The maple tree
909 * @mn: The maple node
910 * @type: The maple node type
911 * @offset: The offset of the highest sub-gap in this node.
912 * @end: The end of the data in this node.
913 */
914 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
915 enum maple_type type)
916 {
917 struct maple_metadata *meta;
918 unsigned long *pivots;
919 void __rcu **slots;
920 void *next;
921
922 switch (type) {
923 case maple_range_64:
924 pivots = mn->mr64.pivot;
925 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
926 slots = mn->mr64.slot;
927 next = mt_slot_locked(mt, slots,
928 MAPLE_RANGE64_SLOTS - 1);
929 if (unlikely((mte_to_node(next) &&
930 mte_node_type(next))))
931 return; /* no metadata, could be node */
932 }
933 fallthrough;
934 case maple_arange_64:
935 meta = ma_meta(mn, type);
936 break;
937 default:
938 return;
939 }
940
941 meta->gap = 0;
942 meta->end = 0;
943 }
944
945 /*
946 * ma_meta_end() - Get the data end of a node from the metadata
947 * @mn: The maple node
948 * @mt: The maple node type
949 */
950 static inline unsigned char ma_meta_end(struct maple_node *mn,
951 enum maple_type mt)
952 {
953 struct maple_metadata *meta = ma_meta(mn, mt);
954
955 return meta->end;
956 }
957
958 /*
959 * ma_meta_gap() - Get the largest gap location of a node from the metadata
960 * @mn: The maple node
961 * @mt: The maple node type
962 */
963 static inline unsigned char ma_meta_gap(struct maple_node *mn,
964 enum maple_type mt)
965 {
966 return mn->ma64.meta.gap;
967 }
968
969 /*
970 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
971 * @mn: The maple node
972 * @mn: The maple node type
973 * @offset: The location of the largest gap.
974 */
975 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
976 unsigned char offset)
977 {
978
979 struct maple_metadata *meta = ma_meta(mn, mt);
980
981 meta->gap = offset;
982 }
983
984 /*
985 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
986 * @mat - the ma_topiary, a linked list of dead nodes.
987 * @dead_enode - the node to be marked as dead and added to the tail of the list
988 *
989 * Add the @dead_enode to the linked list in @mat.
990 */
991 static inline void mat_add(struct ma_topiary *mat,
992 struct maple_enode *dead_enode)
993 {
994 mte_set_node_dead(dead_enode);
995 mte_to_mat(dead_enode)->next = NULL;
996 if (!mat->tail) {
997 mat->tail = mat->head = dead_enode;
998 return;
999 }
1000
1001 mte_to_mat(mat->tail)->next = dead_enode;
1002 mat->tail = dead_enode;
1003 }
1004
1005 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1006 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1007
1008 /*
1009 * mas_mat_free() - Free all nodes in a dead list.
1010 * @mas - the maple state
1011 * @mat - the ma_topiary linked list of dead nodes to free.
1012 *
1013 * Free walk a dead list.
1014 */
1015 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1016 {
1017 struct maple_enode *next;
1018
1019 while (mat->head) {
1020 next = mte_to_mat(mat->head)->next;
1021 mas_free(mas, mat->head);
1022 mat->head = next;
1023 }
1024 }
1025
1026 /*
1027 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1028 * @mas - the maple state
1029 * @mat - the ma_topiary linked list of dead nodes to free.
1030 *
1031 * Destroy walk a dead list.
1032 */
1033 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1034 {
1035 struct maple_enode *next;
1036
1037 while (mat->head) {
1038 next = mte_to_mat(mat->head)->next;
1039 mte_destroy_walk(mat->head, mat->mtree);
1040 mat->head = next;
1041 }
1042 }
1043 /*
1044 * mas_descend() - Descend into the slot stored in the ma_state.
1045 * @mas - the maple state.
1046 *
1047 * Note: Not RCU safe, only use in write side or debug code.
1048 */
1049 static inline void mas_descend(struct ma_state *mas)
1050 {
1051 enum maple_type type;
1052 unsigned long *pivots;
1053 struct maple_node *node;
1054 void __rcu **slots;
1055
1056 node = mas_mn(mas);
1057 type = mte_node_type(mas->node);
1058 pivots = ma_pivots(node, type);
1059 slots = ma_slots(node, type);
1060
1061 if (mas->offset)
1062 mas->min = pivots[mas->offset - 1] + 1;
1063 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1064 mas->node = mas_slot(mas, slots, mas->offset);
1065 }
1066
1067 /*
1068 * mte_set_gap() - Set a maple node gap.
1069 * @mn: The encoded maple node
1070 * @gap: The offset of the gap to set
1071 * @val: The gap value
1072 */
1073 static inline void mte_set_gap(const struct maple_enode *mn,
1074 unsigned char gap, unsigned long val)
1075 {
1076 switch (mte_node_type(mn)) {
1077 default:
1078 break;
1079 case maple_arange_64:
1080 mte_to_node(mn)->ma64.gap[gap] = val;
1081 break;
1082 }
1083 }
1084
1085 /*
1086 * mas_ascend() - Walk up a level of the tree.
1087 * @mas: The maple state
1088 *
1089 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1090 * may cause several levels of walking up to find the correct min and max.
1091 * May find a dead node which will cause a premature return.
1092 * Return: 1 on dead node, 0 otherwise
1093 */
1094 static int mas_ascend(struct ma_state *mas)
1095 {
1096 struct maple_enode *p_enode; /* parent enode. */
1097 struct maple_enode *a_enode; /* ancestor enode. */
1098 struct maple_node *a_node; /* ancestor node. */
1099 struct maple_node *p_node; /* parent node. */
1100 unsigned char a_slot;
1101 enum maple_type a_type;
1102 unsigned long min, max;
1103 unsigned long *pivots;
1104 bool set_max = false, set_min = false;
1105
1106 a_node = mas_mn(mas);
1107 if (ma_is_root(a_node)) {
1108 mas->offset = 0;
1109 return 0;
1110 }
1111
1112 p_node = mte_parent(mas->node);
1113 if (unlikely(a_node == p_node))
1114 return 1;
1115
1116 a_type = mas_parent_type(mas, mas->node);
1117 mas->offset = mte_parent_slot(mas->node);
1118 a_enode = mt_mk_node(p_node, a_type);
1119
1120 /* Check to make sure all parent information is still accurate */
1121 if (p_node != mte_parent(mas->node))
1122 return 1;
1123
1124 mas->node = a_enode;
1125
1126 if (mte_is_root(a_enode)) {
1127 mas->max = ULONG_MAX;
1128 mas->min = 0;
1129 return 0;
1130 }
1131
1132 if (!mas->min)
1133 set_min = true;
1134
1135 if (mas->max == ULONG_MAX)
1136 set_max = true;
1137
1138 min = 0;
1139 max = ULONG_MAX;
1140 do {
1141 p_enode = a_enode;
1142 a_type = mas_parent_type(mas, p_enode);
1143 a_node = mte_parent(p_enode);
1144 a_slot = mte_parent_slot(p_enode);
1145 a_enode = mt_mk_node(a_node, a_type);
1146 pivots = ma_pivots(a_node, a_type);
1147
1148 if (unlikely(ma_dead_node(a_node)))
1149 return 1;
1150
1151 if (!set_min && a_slot) {
1152 set_min = true;
1153 min = pivots[a_slot - 1] + 1;
1154 }
1155
1156 if (!set_max && a_slot < mt_pivots[a_type]) {
1157 set_max = true;
1158 max = pivots[a_slot];
1159 }
1160
1161 if (unlikely(ma_dead_node(a_node)))
1162 return 1;
1163
1164 if (unlikely(ma_is_root(a_node)))
1165 break;
1166
1167 } while (!set_min || !set_max);
1168
1169 mas->max = max;
1170 mas->min = min;
1171 return 0;
1172 }
1173
1174 /*
1175 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1176 * @mas: The maple state
1177 *
1178 * Return: A pointer to a maple node.
1179 */
1180 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1181 {
1182 struct maple_alloc *ret, *node = mas->alloc;
1183 unsigned long total = mas_allocated(mas);
1184 unsigned int req = mas_alloc_req(mas);
1185
1186 /* nothing or a request pending. */
1187 if (WARN_ON(!total))
1188 return NULL;
1189
1190 if (total == 1) {
1191 /* single allocation in this ma_state */
1192 mas->alloc = NULL;
1193 ret = node;
1194 goto single_node;
1195 }
1196
1197 if (node->node_count == 1) {
1198 /* Single allocation in this node. */
1199 mas->alloc = node->slot[0];
1200 mas->alloc->total = node->total - 1;
1201 ret = node;
1202 goto new_head;
1203 }
1204 node->total--;
1205 ret = node->slot[--node->node_count];
1206 node->slot[node->node_count] = NULL;
1207
1208 single_node:
1209 new_head:
1210 if (req) {
1211 req++;
1212 mas_set_alloc_req(mas, req);
1213 }
1214
1215 memset(ret, 0, sizeof(*ret));
1216 return (struct maple_node *)ret;
1217 }
1218
1219 /*
1220 * mas_push_node() - Push a node back on the maple state allocation.
1221 * @mas: The maple state
1222 * @used: The used maple node
1223 *
1224 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1225 * requested node count as necessary.
1226 */
1227 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1228 {
1229 struct maple_alloc *reuse = (struct maple_alloc *)used;
1230 struct maple_alloc *head = mas->alloc;
1231 unsigned long count;
1232 unsigned int requested = mas_alloc_req(mas);
1233
1234 count = mas_allocated(mas);
1235
1236 reuse->request_count = 0;
1237 reuse->node_count = 0;
1238 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1239 head->slot[head->node_count++] = reuse;
1240 head->total++;
1241 goto done;
1242 }
1243
1244 reuse->total = 1;
1245 if ((head) && !((unsigned long)head & 0x1)) {
1246 reuse->slot[0] = head;
1247 reuse->node_count = 1;
1248 reuse->total += head->total;
1249 }
1250
1251 mas->alloc = reuse;
1252 done:
1253 if (requested > 1)
1254 mas_set_alloc_req(mas, requested - 1);
1255 }
1256
1257 /*
1258 * mas_alloc_nodes() - Allocate nodes into a maple state
1259 * @mas: The maple state
1260 * @gfp: The GFP Flags
1261 */
1262 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1263 {
1264 struct maple_alloc *node;
1265 unsigned long allocated = mas_allocated(mas);
1266 unsigned int requested = mas_alloc_req(mas);
1267 unsigned int count;
1268 void **slots = NULL;
1269 unsigned int max_req = 0;
1270
1271 if (!requested)
1272 return;
1273
1274 mas_set_alloc_req(mas, 0);
1275 if (mas->mas_flags & MA_STATE_PREALLOC) {
1276 if (allocated)
1277 return;
1278 WARN_ON(!allocated);
1279 }
1280
1281 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1282 node = (struct maple_alloc *)mt_alloc_one(gfp);
1283 if (!node)
1284 goto nomem_one;
1285
1286 if (allocated) {
1287 node->slot[0] = mas->alloc;
1288 node->node_count = 1;
1289 } else {
1290 node->node_count = 0;
1291 }
1292
1293 mas->alloc = node;
1294 node->total = ++allocated;
1295 requested--;
1296 }
1297
1298 node = mas->alloc;
1299 node->request_count = 0;
1300 while (requested) {
1301 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1302 slots = (void **)&node->slot[node->node_count];
1303 max_req = min(requested, max_req);
1304 count = mt_alloc_bulk(gfp, max_req, slots);
1305 if (!count)
1306 goto nomem_bulk;
1307
1308 if (node->node_count == 0) {
1309 node->slot[0]->node_count = 0;
1310 node->slot[0]->request_count = 0;
1311 }
1312
1313 node->node_count += count;
1314 allocated += count;
1315 node = node->slot[0];
1316 requested -= count;
1317 }
1318 mas->alloc->total = allocated;
1319 return;
1320
1321 nomem_bulk:
1322 /* Clean up potential freed allocations on bulk failure */
1323 memset(slots, 0, max_req * sizeof(unsigned long));
1324 nomem_one:
1325 mas_set_alloc_req(mas, requested);
1326 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1327 mas->alloc->total = allocated;
1328 mas_set_err(mas, -ENOMEM);
1329 }
1330
1331 /*
1332 * mas_free() - Free an encoded maple node
1333 * @mas: The maple state
1334 * @used: The encoded maple node to free.
1335 *
1336 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1337 * otherwise.
1338 */
1339 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1340 {
1341 struct maple_node *tmp = mte_to_node(used);
1342
1343 if (mt_in_rcu(mas->tree))
1344 ma_free_rcu(tmp);
1345 else
1346 mas_push_node(mas, tmp);
1347 }
1348
1349 /*
1350 * mas_node_count() - Check if enough nodes are allocated and request more if
1351 * there is not enough nodes.
1352 * @mas: The maple state
1353 * @count: The number of nodes needed
1354 * @gfp: the gfp flags
1355 */
1356 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1357 {
1358 unsigned long allocated = mas_allocated(mas);
1359
1360 if (allocated < count) {
1361 mas_set_alloc_req(mas, count - allocated);
1362 mas_alloc_nodes(mas, gfp);
1363 }
1364 }
1365
1366 /*
1367 * mas_node_count() - Check if enough nodes are allocated and request more if
1368 * there is not enough nodes.
1369 * @mas: The maple state
1370 * @count: The number of nodes needed
1371 *
1372 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1373 */
1374 static void mas_node_count(struct ma_state *mas, int count)
1375 {
1376 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1377 }
1378
1379 /*
1380 * mas_start() - Sets up maple state for operations.
1381 * @mas: The maple state.
1382 *
1383 * If mas->node == MAS_START, then set the min, max and depth to
1384 * defaults.
1385 *
1386 * Return:
1387 * - If mas->node is an error or not MAS_START, return NULL.
1388 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1389 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1390 * - If it's a tree: NULL & mas->node == safe root node.
1391 */
1392 static inline struct maple_enode *mas_start(struct ma_state *mas)
1393 {
1394 if (likely(mas_is_start(mas))) {
1395 struct maple_enode *root;
1396
1397 mas->min = 0;
1398 mas->max = ULONG_MAX;
1399
1400 retry:
1401 mas->depth = 0;
1402 root = mas_root(mas);
1403 /* Tree with nodes */
1404 if (likely(xa_is_node(root))) {
1405 mas->depth = 1;
1406 mas->node = mte_safe_root(root);
1407 mas->offset = 0;
1408 if (mte_dead_node(mas->node))
1409 goto retry;
1410
1411 return NULL;
1412 }
1413
1414 /* empty tree */
1415 if (unlikely(!root)) {
1416 mas->node = MAS_NONE;
1417 mas->offset = MAPLE_NODE_SLOTS;
1418 return NULL;
1419 }
1420
1421 /* Single entry tree */
1422 mas->node = MAS_ROOT;
1423 mas->offset = MAPLE_NODE_SLOTS;
1424
1425 /* Single entry tree. */
1426 if (mas->index > 0)
1427 return NULL;
1428
1429 return root;
1430 }
1431
1432 return NULL;
1433 }
1434
1435 /*
1436 * ma_data_end() - Find the end of the data in a node.
1437 * @node: The maple node
1438 * @type: The maple node type
1439 * @pivots: The array of pivots in the node
1440 * @max: The maximum value in the node
1441 *
1442 * Uses metadata to find the end of the data when possible.
1443 * Return: The zero indexed last slot with data (may be null).
1444 */
1445 static inline unsigned char ma_data_end(struct maple_node *node,
1446 enum maple_type type,
1447 unsigned long *pivots,
1448 unsigned long max)
1449 {
1450 unsigned char offset;
1451
1452 if (!pivots)
1453 return 0;
1454
1455 if (type == maple_arange_64)
1456 return ma_meta_end(node, type);
1457
1458 offset = mt_pivots[type] - 1;
1459 if (likely(!pivots[offset]))
1460 return ma_meta_end(node, type);
1461
1462 if (likely(pivots[offset] == max))
1463 return offset;
1464
1465 return mt_pivots[type];
1466 }
1467
1468 /*
1469 * mas_data_end() - Find the end of the data (slot).
1470 * @mas: the maple state
1471 *
1472 * This method is optimized to check the metadata of a node if the node type
1473 * supports data end metadata.
1474 *
1475 * Return: The zero indexed last slot with data (may be null).
1476 */
1477 static inline unsigned char mas_data_end(struct ma_state *mas)
1478 {
1479 enum maple_type type;
1480 struct maple_node *node;
1481 unsigned char offset;
1482 unsigned long *pivots;
1483
1484 type = mte_node_type(mas->node);
1485 node = mas_mn(mas);
1486 if (type == maple_arange_64)
1487 return ma_meta_end(node, type);
1488
1489 pivots = ma_pivots(node, type);
1490 if (unlikely(ma_dead_node(node)))
1491 return 0;
1492
1493 offset = mt_pivots[type] - 1;
1494 if (likely(!pivots[offset]))
1495 return ma_meta_end(node, type);
1496
1497 if (likely(pivots[offset] == mas->max))
1498 return offset;
1499
1500 return mt_pivots[type];
1501 }
1502
1503 /*
1504 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1505 * @mas - the maple state
1506 *
1507 * Return: The maximum gap in the leaf.
1508 */
1509 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1510 {
1511 enum maple_type mt;
1512 unsigned long pstart, gap, max_gap;
1513 struct maple_node *mn;
1514 unsigned long *pivots;
1515 void __rcu **slots;
1516 unsigned char i;
1517 unsigned char max_piv;
1518
1519 mt = mte_node_type(mas->node);
1520 mn = mas_mn(mas);
1521 slots = ma_slots(mn, mt);
1522 max_gap = 0;
1523 if (unlikely(ma_is_dense(mt))) {
1524 gap = 0;
1525 for (i = 0; i < mt_slots[mt]; i++) {
1526 if (slots[i]) {
1527 if (gap > max_gap)
1528 max_gap = gap;
1529 gap = 0;
1530 } else {
1531 gap++;
1532 }
1533 }
1534 if (gap > max_gap)
1535 max_gap = gap;
1536 return max_gap;
1537 }
1538
1539 /*
1540 * Check the first implied pivot optimizes the loop below and slot 1 may
1541 * be skipped if there is a gap in slot 0.
1542 */
1543 pivots = ma_pivots(mn, mt);
1544 if (likely(!slots[0])) {
1545 max_gap = pivots[0] - mas->min + 1;
1546 i = 2;
1547 } else {
1548 i = 1;
1549 }
1550
1551 /* reduce max_piv as the special case is checked before the loop */
1552 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1553 /*
1554 * Check end implied pivot which can only be a gap on the right most
1555 * node.
1556 */
1557 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1558 gap = ULONG_MAX - pivots[max_piv];
1559 if (gap > max_gap)
1560 max_gap = gap;
1561 }
1562
1563 for (; i <= max_piv; i++) {
1564 /* data == no gap. */
1565 if (likely(slots[i]))
1566 continue;
1567
1568 pstart = pivots[i - 1];
1569 gap = pivots[i] - pstart;
1570 if (gap > max_gap)
1571 max_gap = gap;
1572
1573 /* There cannot be two gaps in a row. */
1574 i++;
1575 }
1576 return max_gap;
1577 }
1578
1579 /*
1580 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1581 * @node: The maple node
1582 * @gaps: The pointer to the gaps
1583 * @mt: The maple node type
1584 * @*off: Pointer to store the offset location of the gap.
1585 *
1586 * Uses the metadata data end to scan backwards across set gaps.
1587 *
1588 * Return: The maximum gap value
1589 */
1590 static inline unsigned long
1591 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1592 unsigned char *off)
1593 {
1594 unsigned char offset, i;
1595 unsigned long max_gap = 0;
1596
1597 i = offset = ma_meta_end(node, mt);
1598 do {
1599 if (gaps[i] > max_gap) {
1600 max_gap = gaps[i];
1601 offset = i;
1602 }
1603 } while (i--);
1604
1605 *off = offset;
1606 return max_gap;
1607 }
1608
1609 /*
1610 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1611 * @mas: The maple state.
1612 *
1613 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1614 *
1615 * Return: The gap value.
1616 */
1617 static inline unsigned long mas_max_gap(struct ma_state *mas)
1618 {
1619 unsigned long *gaps;
1620 unsigned char offset;
1621 enum maple_type mt;
1622 struct maple_node *node;
1623
1624 mt = mte_node_type(mas->node);
1625 if (ma_is_leaf(mt))
1626 return mas_leaf_max_gap(mas);
1627
1628 node = mas_mn(mas);
1629 MAS_BUG_ON(mas, mt != maple_arange_64);
1630 offset = ma_meta_gap(node, mt);
1631 if (offset == MAPLE_ARANGE64_META_MAX)
1632 return 0;
1633
1634 gaps = ma_gaps(node, mt);
1635 return gaps[offset];
1636 }
1637
1638 /*
1639 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1640 * @mas: The maple state
1641 * @offset: The gap offset in the parent to set
1642 * @new: The new gap value.
1643 *
1644 * Set the parent gap then continue to set the gap upwards, using the metadata
1645 * of the parent to see if it is necessary to check the node above.
1646 */
1647 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1648 unsigned long new)
1649 {
1650 unsigned long meta_gap = 0;
1651 struct maple_node *pnode;
1652 struct maple_enode *penode;
1653 unsigned long *pgaps;
1654 unsigned char meta_offset;
1655 enum maple_type pmt;
1656
1657 pnode = mte_parent(mas->node);
1658 pmt = mas_parent_type(mas, mas->node);
1659 penode = mt_mk_node(pnode, pmt);
1660 pgaps = ma_gaps(pnode, pmt);
1661
1662 ascend:
1663 MAS_BUG_ON(mas, pmt != maple_arange_64);
1664 meta_offset = ma_meta_gap(pnode, pmt);
1665 if (meta_offset == MAPLE_ARANGE64_META_MAX)
1666 meta_gap = 0;
1667 else
1668 meta_gap = pgaps[meta_offset];
1669
1670 pgaps[offset] = new;
1671
1672 if (meta_gap == new)
1673 return;
1674
1675 if (offset != meta_offset) {
1676 if (meta_gap > new)
1677 return;
1678
1679 ma_set_meta_gap(pnode, pmt, offset);
1680 } else if (new < meta_gap) {
1681 meta_offset = 15;
1682 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1683 ma_set_meta_gap(pnode, pmt, meta_offset);
1684 }
1685
1686 if (ma_is_root(pnode))
1687 return;
1688
1689 /* Go to the parent node. */
1690 pnode = mte_parent(penode);
1691 pmt = mas_parent_type(mas, penode);
1692 pgaps = ma_gaps(pnode, pmt);
1693 offset = mte_parent_slot(penode);
1694 penode = mt_mk_node(pnode, pmt);
1695 goto ascend;
1696 }
1697
1698 /*
1699 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1700 * @mas - the maple state.
1701 */
1702 static inline void mas_update_gap(struct ma_state *mas)
1703 {
1704 unsigned char pslot;
1705 unsigned long p_gap;
1706 unsigned long max_gap;
1707
1708 if (!mt_is_alloc(mas->tree))
1709 return;
1710
1711 if (mte_is_root(mas->node))
1712 return;
1713
1714 max_gap = mas_max_gap(mas);
1715
1716 pslot = mte_parent_slot(mas->node);
1717 p_gap = ma_gaps(mte_parent(mas->node),
1718 mas_parent_type(mas, mas->node))[pslot];
1719
1720 if (p_gap != max_gap)
1721 mas_parent_gap(mas, pslot, max_gap);
1722 }
1723
1724 /*
1725 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1726 * @parent with the slot encoded.
1727 * @mas - the maple state (for the tree)
1728 * @parent - the maple encoded node containing the children.
1729 */
1730 static inline void mas_adopt_children(struct ma_state *mas,
1731 struct maple_enode *parent)
1732 {
1733 enum maple_type type = mte_node_type(parent);
1734 struct maple_node *node = mas_mn(mas);
1735 void __rcu **slots = ma_slots(node, type);
1736 unsigned long *pivots = ma_pivots(node, type);
1737 struct maple_enode *child;
1738 unsigned char offset;
1739
1740 offset = ma_data_end(node, type, pivots, mas->max);
1741 do {
1742 child = mas_slot_locked(mas, slots, offset);
1743 mas_set_parent(mas, child, parent, offset);
1744 } while (offset--);
1745 }
1746
1747 /*
1748 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1749 * parent encoding to locate the maple node in the tree.
1750 * @mas - the ma_state to use for operations.
1751 * @advanced - boolean to adopt the child nodes and free the old node (false) or
1752 * leave the node (true) and handle the adoption and free elsewhere.
1753 */
1754 static inline void mas_replace(struct ma_state *mas, bool advanced)
1755 __must_hold(mas->tree->lock)
1756 {
1757 struct maple_node *mn = mas_mn(mas);
1758 struct maple_enode *old_enode;
1759 unsigned char offset = 0;
1760 void __rcu **slots = NULL;
1761
1762 if (ma_is_root(mn)) {
1763 old_enode = mas_root_locked(mas);
1764 } else {
1765 offset = mte_parent_slot(mas->node);
1766 slots = ma_slots(mte_parent(mas->node),
1767 mas_parent_type(mas, mas->node));
1768 old_enode = mas_slot_locked(mas, slots, offset);
1769 }
1770
1771 if (!advanced && !mte_is_leaf(mas->node))
1772 mas_adopt_children(mas, mas->node);
1773
1774 if (mte_is_root(mas->node)) {
1775 mn->parent = ma_parent_ptr(
1776 ((unsigned long)mas->tree | MA_ROOT_PARENT));
1777 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1778 mas_set_height(mas);
1779 } else {
1780 rcu_assign_pointer(slots[offset], mas->node);
1781 }
1782
1783 if (!advanced) {
1784 mte_set_node_dead(old_enode);
1785 mas_free(mas, old_enode);
1786 }
1787 }
1788
1789 /*
1790 * mas_new_child() - Find the new child of a node.
1791 * @mas: the maple state
1792 * @child: the maple state to store the child.
1793 */
1794 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1795 __must_hold(mas->tree->lock)
1796 {
1797 enum maple_type mt;
1798 unsigned char offset;
1799 unsigned char end;
1800 unsigned long *pivots;
1801 struct maple_enode *entry;
1802 struct maple_node *node;
1803 void __rcu **slots;
1804
1805 mt = mte_node_type(mas->node);
1806 node = mas_mn(mas);
1807 slots = ma_slots(node, mt);
1808 pivots = ma_pivots(node, mt);
1809 end = ma_data_end(node, mt, pivots, mas->max);
1810 for (offset = mas->offset; offset <= end; offset++) {
1811 entry = mas_slot_locked(mas, slots, offset);
1812 if (mte_parent(entry) == node) {
1813 *child = *mas;
1814 mas->offset = offset + 1;
1815 child->offset = offset;
1816 mas_descend(child);
1817 child->offset = 0;
1818 return true;
1819 }
1820 }
1821 return false;
1822 }
1823
1824 /*
1825 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1826 * old data or set b_node->b_end.
1827 * @b_node: the maple_big_node
1828 * @shift: the shift count
1829 */
1830 static inline void mab_shift_right(struct maple_big_node *b_node,
1831 unsigned char shift)
1832 {
1833 unsigned long size = b_node->b_end * sizeof(unsigned long);
1834
1835 memmove(b_node->pivot + shift, b_node->pivot, size);
1836 memmove(b_node->slot + shift, b_node->slot, size);
1837 if (b_node->type == maple_arange_64)
1838 memmove(b_node->gap + shift, b_node->gap, size);
1839 }
1840
1841 /*
1842 * mab_middle_node() - Check if a middle node is needed (unlikely)
1843 * @b_node: the maple_big_node that contains the data.
1844 * @size: the amount of data in the b_node
1845 * @split: the potential split location
1846 * @slot_count: the size that can be stored in a single node being considered.
1847 *
1848 * Return: true if a middle node is required.
1849 */
1850 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1851 unsigned char slot_count)
1852 {
1853 unsigned char size = b_node->b_end;
1854
1855 if (size >= 2 * slot_count)
1856 return true;
1857
1858 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1859 return true;
1860
1861 return false;
1862 }
1863
1864 /*
1865 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1866 * @b_node: the maple_big_node with the data
1867 * @split: the suggested split location
1868 * @slot_count: the number of slots in the node being considered.
1869 *
1870 * Return: the split location.
1871 */
1872 static inline int mab_no_null_split(struct maple_big_node *b_node,
1873 unsigned char split, unsigned char slot_count)
1874 {
1875 if (!b_node->slot[split]) {
1876 /*
1877 * If the split is less than the max slot && the right side will
1878 * still be sufficient, then increment the split on NULL.
1879 */
1880 if ((split < slot_count - 1) &&
1881 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1882 split++;
1883 else
1884 split--;
1885 }
1886 return split;
1887 }
1888
1889 /*
1890 * mab_calc_split() - Calculate the split location and if there needs to be two
1891 * splits.
1892 * @bn: The maple_big_node with the data
1893 * @mid_split: The second split, if required. 0 otherwise.
1894 *
1895 * Return: The first split location. The middle split is set in @mid_split.
1896 */
1897 static inline int mab_calc_split(struct ma_state *mas,
1898 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1899 {
1900 unsigned char b_end = bn->b_end;
1901 int split = b_end / 2; /* Assume equal split. */
1902 unsigned char slot_min, slot_count = mt_slots[bn->type];
1903
1904 /*
1905 * To support gap tracking, all NULL entries are kept together and a node cannot
1906 * end on a NULL entry, with the exception of the left-most leaf. The
1907 * limitation means that the split of a node must be checked for this condition
1908 * and be able to put more data in one direction or the other.
1909 */
1910 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1911 *mid_split = 0;
1912 split = b_end - mt_min_slots[bn->type];
1913
1914 if (!ma_is_leaf(bn->type))
1915 return split;
1916
1917 mas->mas_flags |= MA_STATE_REBALANCE;
1918 if (!bn->slot[split])
1919 split--;
1920 return split;
1921 }
1922
1923 /*
1924 * Although extremely rare, it is possible to enter what is known as the 3-way
1925 * split scenario. The 3-way split comes about by means of a store of a range
1926 * that overwrites the end and beginning of two full nodes. The result is a set
1927 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1928 * also be located in different parent nodes which are also full. This can
1929 * carry upwards all the way to the root in the worst case.
1930 */
1931 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1932 split = b_end / 3;
1933 *mid_split = split * 2;
1934 } else {
1935 slot_min = mt_min_slots[bn->type];
1936
1937 *mid_split = 0;
1938 /*
1939 * Avoid having a range less than the slot count unless it
1940 * causes one node to be deficient.
1941 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1942 */
1943 while ((split < slot_count - 1) &&
1944 ((bn->pivot[split] - min) < slot_count - 1) &&
1945 (b_end - split > slot_min))
1946 split++;
1947 }
1948
1949 /* Avoid ending a node on a NULL entry */
1950 split = mab_no_null_split(bn, split, slot_count);
1951
1952 if (unlikely(*mid_split))
1953 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1954
1955 return split;
1956 }
1957
1958 /*
1959 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1960 * and set @b_node->b_end to the next free slot.
1961 * @mas: The maple state
1962 * @mas_start: The starting slot to copy
1963 * @mas_end: The end slot to copy (inclusively)
1964 * @b_node: The maple_big_node to place the data
1965 * @mab_start: The starting location in maple_big_node to store the data.
1966 */
1967 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1968 unsigned char mas_end, struct maple_big_node *b_node,
1969 unsigned char mab_start)
1970 {
1971 enum maple_type mt;
1972 struct maple_node *node;
1973 void __rcu **slots;
1974 unsigned long *pivots, *gaps;
1975 int i = mas_start, j = mab_start;
1976 unsigned char piv_end;
1977
1978 node = mas_mn(mas);
1979 mt = mte_node_type(mas->node);
1980 pivots = ma_pivots(node, mt);
1981 if (!i) {
1982 b_node->pivot[j] = pivots[i++];
1983 if (unlikely(i > mas_end))
1984 goto complete;
1985 j++;
1986 }
1987
1988 piv_end = min(mas_end, mt_pivots[mt]);
1989 for (; i < piv_end; i++, j++) {
1990 b_node->pivot[j] = pivots[i];
1991 if (unlikely(!b_node->pivot[j]))
1992 break;
1993
1994 if (unlikely(mas->max == b_node->pivot[j]))
1995 goto complete;
1996 }
1997
1998 if (likely(i <= mas_end))
1999 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
2000
2001 complete:
2002 b_node->b_end = ++j;
2003 j -= mab_start;
2004 slots = ma_slots(node, mt);
2005 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2006 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2007 gaps = ma_gaps(node, mt);
2008 memcpy(b_node->gap + mab_start, gaps + mas_start,
2009 sizeof(unsigned long) * j);
2010 }
2011 }
2012
2013 /*
2014 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2015 * @mas: The maple state
2016 * @node: The maple node
2017 * @pivots: pointer to the maple node pivots
2018 * @mt: The maple type
2019 * @end: The assumed end
2020 *
2021 * Note, end may be incremented within this function but not modified at the
2022 * source. This is fine since the metadata is the last thing to be stored in a
2023 * node during a write.
2024 */
2025 static inline void mas_leaf_set_meta(struct ma_state *mas,
2026 struct maple_node *node, unsigned long *pivots,
2027 enum maple_type mt, unsigned char end)
2028 {
2029 /* There is no room for metadata already */
2030 if (mt_pivots[mt] <= end)
2031 return;
2032
2033 if (pivots[end] && pivots[end] < mas->max)
2034 end++;
2035
2036 if (end < mt_slots[mt] - 1)
2037 ma_set_meta(node, mt, 0, end);
2038 }
2039
2040 /*
2041 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2042 * @b_node: the maple_big_node that has the data
2043 * @mab_start: the start location in @b_node.
2044 * @mab_end: The end location in @b_node (inclusively)
2045 * @mas: The maple state with the maple encoded node.
2046 */
2047 static inline void mab_mas_cp(struct maple_big_node *b_node,
2048 unsigned char mab_start, unsigned char mab_end,
2049 struct ma_state *mas, bool new_max)
2050 {
2051 int i, j = 0;
2052 enum maple_type mt = mte_node_type(mas->node);
2053 struct maple_node *node = mte_to_node(mas->node);
2054 void __rcu **slots = ma_slots(node, mt);
2055 unsigned long *pivots = ma_pivots(node, mt);
2056 unsigned long *gaps = NULL;
2057 unsigned char end;
2058
2059 if (mab_end - mab_start > mt_pivots[mt])
2060 mab_end--;
2061
2062 if (!pivots[mt_pivots[mt] - 1])
2063 slots[mt_pivots[mt]] = NULL;
2064
2065 i = mab_start;
2066 do {
2067 pivots[j++] = b_node->pivot[i++];
2068 } while (i <= mab_end && likely(b_node->pivot[i]));
2069
2070 memcpy(slots, b_node->slot + mab_start,
2071 sizeof(void *) * (i - mab_start));
2072
2073 if (new_max)
2074 mas->max = b_node->pivot[i - 1];
2075
2076 end = j - 1;
2077 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2078 unsigned long max_gap = 0;
2079 unsigned char offset = 15;
2080
2081 gaps = ma_gaps(node, mt);
2082 do {
2083 gaps[--j] = b_node->gap[--i];
2084 if (gaps[j] > max_gap) {
2085 offset = j;
2086 max_gap = gaps[j];
2087 }
2088 } while (j);
2089
2090 ma_set_meta(node, mt, offset, end);
2091 } else {
2092 mas_leaf_set_meta(mas, node, pivots, mt, end);
2093 }
2094 }
2095
2096 /*
2097 * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2098 * @mas: the maple state with the maple encoded node of the sub-tree.
2099 *
2100 * Descend through a sub-tree and adopt children who do not have the correct
2101 * parents set. Follow the parents which have the correct parents as they are
2102 * the new entries which need to be followed to find other incorrectly set
2103 * parents.
2104 */
2105 static inline void mas_descend_adopt(struct ma_state *mas)
2106 {
2107 struct ma_state list[3], next[3];
2108 int i, n;
2109
2110 /*
2111 * At each level there may be up to 3 correct parent pointers which indicates
2112 * the new nodes which need to be walked to find any new nodes at a lower level.
2113 */
2114
2115 for (i = 0; i < 3; i++) {
2116 list[i] = *mas;
2117 list[i].offset = 0;
2118 next[i].offset = 0;
2119 }
2120 next[0] = *mas;
2121
2122 while (!mte_is_leaf(list[0].node)) {
2123 n = 0;
2124 for (i = 0; i < 3; i++) {
2125 if (mas_is_none(&list[i]))
2126 continue;
2127
2128 if (i && list[i-1].node == list[i].node)
2129 continue;
2130
2131 while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2132 n++;
2133
2134 mas_adopt_children(&list[i], list[i].node);
2135 }
2136
2137 while (n < 3)
2138 next[n++].node = MAS_NONE;
2139
2140 /* descend by setting the list to the children */
2141 for (i = 0; i < 3; i++)
2142 list[i] = next[i];
2143 }
2144 }
2145
2146 /*
2147 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2148 * @mas: The maple state
2149 * @end: The maple node end
2150 * @mt: The maple node type
2151 */
2152 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2153 enum maple_type mt)
2154 {
2155 if (!(mas->mas_flags & MA_STATE_BULK))
2156 return;
2157
2158 if (mte_is_root(mas->node))
2159 return;
2160
2161 if (end > mt_min_slots[mt]) {
2162 mas->mas_flags &= ~MA_STATE_REBALANCE;
2163 return;
2164 }
2165 }
2166
2167 /*
2168 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2169 * data from a maple encoded node.
2170 * @wr_mas: the maple write state
2171 * @b_node: the maple_big_node to fill with data
2172 * @offset_end: the offset to end copying
2173 *
2174 * Return: The actual end of the data stored in @b_node
2175 */
2176 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2177 struct maple_big_node *b_node, unsigned char offset_end)
2178 {
2179 unsigned char slot;
2180 unsigned char b_end;
2181 /* Possible underflow of piv will wrap back to 0 before use. */
2182 unsigned long piv;
2183 struct ma_state *mas = wr_mas->mas;
2184
2185 b_node->type = wr_mas->type;
2186 b_end = 0;
2187 slot = mas->offset;
2188 if (slot) {
2189 /* Copy start data up to insert. */
2190 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2191 b_end = b_node->b_end;
2192 piv = b_node->pivot[b_end - 1];
2193 } else
2194 piv = mas->min - 1;
2195
2196 if (piv + 1 < mas->index) {
2197 /* Handle range starting after old range */
2198 b_node->slot[b_end] = wr_mas->content;
2199 if (!wr_mas->content)
2200 b_node->gap[b_end] = mas->index - 1 - piv;
2201 b_node->pivot[b_end++] = mas->index - 1;
2202 }
2203
2204 /* Store the new entry. */
2205 mas->offset = b_end;
2206 b_node->slot[b_end] = wr_mas->entry;
2207 b_node->pivot[b_end] = mas->last;
2208
2209 /* Appended. */
2210 if (mas->last >= mas->max)
2211 goto b_end;
2212
2213 /* Handle new range ending before old range ends */
2214 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2215 if (piv > mas->last) {
2216 if (piv == ULONG_MAX)
2217 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2218
2219 if (offset_end != slot)
2220 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2221 offset_end);
2222
2223 b_node->slot[++b_end] = wr_mas->content;
2224 if (!wr_mas->content)
2225 b_node->gap[b_end] = piv - mas->last + 1;
2226 b_node->pivot[b_end] = piv;
2227 }
2228
2229 slot = offset_end + 1;
2230 if (slot > wr_mas->node_end)
2231 goto b_end;
2232
2233 /* Copy end data to the end of the node. */
2234 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2235 b_node->b_end--;
2236 return;
2237
2238 b_end:
2239 b_node->b_end = b_end;
2240 }
2241
2242 /*
2243 * mas_prev_sibling() - Find the previous node with the same parent.
2244 * @mas: the maple state
2245 *
2246 * Return: True if there is a previous sibling, false otherwise.
2247 */
2248 static inline bool mas_prev_sibling(struct ma_state *mas)
2249 {
2250 unsigned int p_slot = mte_parent_slot(mas->node);
2251
2252 if (mte_is_root(mas->node))
2253 return false;
2254
2255 if (!p_slot)
2256 return false;
2257
2258 mas_ascend(mas);
2259 mas->offset = p_slot - 1;
2260 mas_descend(mas);
2261 return true;
2262 }
2263
2264 /*
2265 * mas_next_sibling() - Find the next node with the same parent.
2266 * @mas: the maple state
2267 *
2268 * Return: true if there is a next sibling, false otherwise.
2269 */
2270 static inline bool mas_next_sibling(struct ma_state *mas)
2271 {
2272 MA_STATE(parent, mas->tree, mas->index, mas->last);
2273
2274 if (mte_is_root(mas->node))
2275 return false;
2276
2277 parent = *mas;
2278 mas_ascend(&parent);
2279 parent.offset = mte_parent_slot(mas->node) + 1;
2280 if (parent.offset > mas_data_end(&parent))
2281 return false;
2282
2283 *mas = parent;
2284 mas_descend(mas);
2285 return true;
2286 }
2287
2288 /*
2289 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2290 * @enode: The encoded maple node.
2291 *
2292 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2293 *
2294 * Return: @enode or MAS_NONE
2295 */
2296 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2297 {
2298 if (enode)
2299 return enode;
2300
2301 return ma_enode_ptr(MAS_NONE);
2302 }
2303
2304 /*
2305 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2306 * @wr_mas: The maple write state
2307 *
2308 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2309 */
2310 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2311 {
2312 struct ma_state *mas = wr_mas->mas;
2313 unsigned char count, offset;
2314
2315 if (unlikely(ma_is_dense(wr_mas->type))) {
2316 wr_mas->r_max = wr_mas->r_min = mas->index;
2317 mas->offset = mas->index = mas->min;
2318 return;
2319 }
2320
2321 wr_mas->node = mas_mn(wr_mas->mas);
2322 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2323 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2324 wr_mas->pivots, mas->max);
2325 offset = mas->offset;
2326
2327 while (offset < count && mas->index > wr_mas->pivots[offset])
2328 offset++;
2329
2330 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2331 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2332 wr_mas->offset_end = mas->offset = offset;
2333 }
2334
2335 /*
2336 * mas_topiary_range() - Add a range of slots to the topiary.
2337 * @mas: The maple state
2338 * @destroy: The topiary to add the slots (usually destroy)
2339 * @start: The starting slot inclusively
2340 * @end: The end slot inclusively
2341 */
2342 static inline void mas_topiary_range(struct ma_state *mas,
2343 struct ma_topiary *destroy, unsigned char start, unsigned char end)
2344 {
2345 void __rcu **slots;
2346 unsigned char offset;
2347
2348 MAS_BUG_ON(mas, mte_is_leaf(mas->node));
2349
2350 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2351 for (offset = start; offset <= end; offset++) {
2352 struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2353
2354 if (mte_dead_node(enode))
2355 continue;
2356
2357 mat_add(destroy, enode);
2358 }
2359 }
2360
2361 /*
2362 * mast_topiary() - Add the portions of the tree to the removal list; either to
2363 * be freed or discarded (destroy walk).
2364 * @mast: The maple_subtree_state.
2365 */
2366 static inline void mast_topiary(struct maple_subtree_state *mast)
2367 {
2368 MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2369 unsigned char r_start, r_end;
2370 unsigned char l_start, l_end;
2371 void __rcu **l_slots, **r_slots;
2372
2373 wr_mas.type = mte_node_type(mast->orig_l->node);
2374 mast->orig_l->index = mast->orig_l->last;
2375 mas_wr_node_walk(&wr_mas);
2376 l_start = mast->orig_l->offset + 1;
2377 l_end = mas_data_end(mast->orig_l);
2378 r_start = 0;
2379 r_end = mast->orig_r->offset;
2380
2381 if (r_end)
2382 r_end--;
2383
2384 l_slots = ma_slots(mas_mn(mast->orig_l),
2385 mte_node_type(mast->orig_l->node));
2386
2387 r_slots = ma_slots(mas_mn(mast->orig_r),
2388 mte_node_type(mast->orig_r->node));
2389
2390 if ((l_start < l_end) &&
2391 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2392 l_start++;
2393 }
2394
2395 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2396 if (r_end)
2397 r_end--;
2398 }
2399
2400 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2401 return;
2402
2403 /* At the node where left and right sides meet, add the parts between */
2404 if (mast->orig_l->node == mast->orig_r->node) {
2405 return mas_topiary_range(mast->orig_l, mast->destroy,
2406 l_start, r_end);
2407 }
2408
2409 /* mast->orig_r is different and consumed. */
2410 if (mte_is_leaf(mast->orig_r->node))
2411 return;
2412
2413 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2414 l_end--;
2415
2416
2417 if (l_start <= l_end)
2418 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2419
2420 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2421 r_start++;
2422
2423 if (r_start <= r_end)
2424 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2425 }
2426
2427 /*
2428 * mast_rebalance_next() - Rebalance against the next node
2429 * @mast: The maple subtree state
2430 * @old_r: The encoded maple node to the right (next node).
2431 */
2432 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2433 {
2434 unsigned char b_end = mast->bn->b_end;
2435
2436 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2437 mast->bn, b_end);
2438 mast->orig_r->last = mast->orig_r->max;
2439 }
2440
2441 /*
2442 * mast_rebalance_prev() - Rebalance against the previous node
2443 * @mast: The maple subtree state
2444 * @old_l: The encoded maple node to the left (previous node)
2445 */
2446 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2447 {
2448 unsigned char end = mas_data_end(mast->orig_l) + 1;
2449 unsigned char b_end = mast->bn->b_end;
2450
2451 mab_shift_right(mast->bn, end);
2452 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2453 mast->l->min = mast->orig_l->min;
2454 mast->orig_l->index = mast->orig_l->min;
2455 mast->bn->b_end = end + b_end;
2456 mast->l->offset += end;
2457 }
2458
2459 /*
2460 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2461 * the node to the right. Checking the nodes to the right then the left at each
2462 * level upwards until root is reached. Free and destroy as needed.
2463 * Data is copied into the @mast->bn.
2464 * @mast: The maple_subtree_state.
2465 */
2466 static inline
2467 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2468 {
2469 struct ma_state r_tmp = *mast->orig_r;
2470 struct ma_state l_tmp = *mast->orig_l;
2471 struct maple_enode *ancestor = NULL;
2472 unsigned char start, end;
2473 unsigned char depth = 0;
2474
2475 r_tmp = *mast->orig_r;
2476 l_tmp = *mast->orig_l;
2477 do {
2478 mas_ascend(mast->orig_r);
2479 mas_ascend(mast->orig_l);
2480 depth++;
2481 if (!ancestor &&
2482 (mast->orig_r->node == mast->orig_l->node)) {
2483 ancestor = mast->orig_r->node;
2484 end = mast->orig_r->offset - 1;
2485 start = mast->orig_l->offset + 1;
2486 }
2487
2488 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2489 if (!ancestor) {
2490 ancestor = mast->orig_r->node;
2491 start = 0;
2492 }
2493
2494 mast->orig_r->offset++;
2495 do {
2496 mas_descend(mast->orig_r);
2497 mast->orig_r->offset = 0;
2498 depth--;
2499 } while (depth);
2500
2501 mast_rebalance_next(mast);
2502 do {
2503 unsigned char l_off = 0;
2504 struct maple_enode *child = r_tmp.node;
2505
2506 mas_ascend(&r_tmp);
2507 if (ancestor == r_tmp.node)
2508 l_off = start;
2509
2510 if (r_tmp.offset)
2511 r_tmp.offset--;
2512
2513 if (l_off < r_tmp.offset)
2514 mas_topiary_range(&r_tmp, mast->destroy,
2515 l_off, r_tmp.offset);
2516
2517 if (l_tmp.node != child)
2518 mat_add(mast->free, child);
2519
2520 } while (r_tmp.node != ancestor);
2521
2522 *mast->orig_l = l_tmp;
2523 return true;
2524
2525 } else if (mast->orig_l->offset != 0) {
2526 if (!ancestor) {
2527 ancestor = mast->orig_l->node;
2528 end = mas_data_end(mast->orig_l);
2529 }
2530
2531 mast->orig_l->offset--;
2532 do {
2533 mas_descend(mast->orig_l);
2534 mast->orig_l->offset =
2535 mas_data_end(mast->orig_l);
2536 depth--;
2537 } while (depth);
2538
2539 mast_rebalance_prev(mast);
2540 do {
2541 unsigned char r_off;
2542 struct maple_enode *child = l_tmp.node;
2543
2544 mas_ascend(&l_tmp);
2545 if (ancestor == l_tmp.node)
2546 r_off = end;
2547 else
2548 r_off = mas_data_end(&l_tmp);
2549
2550 if (l_tmp.offset < r_off)
2551 l_tmp.offset++;
2552
2553 if (l_tmp.offset < r_off)
2554 mas_topiary_range(&l_tmp, mast->destroy,
2555 l_tmp.offset, r_off);
2556
2557 if (r_tmp.node != child)
2558 mat_add(mast->free, child);
2559
2560 } while (l_tmp.node != ancestor);
2561
2562 *mast->orig_r = r_tmp;
2563 return true;
2564 }
2565 } while (!mte_is_root(mast->orig_r->node));
2566
2567 *mast->orig_r = r_tmp;
2568 *mast->orig_l = l_tmp;
2569 return false;
2570 }
2571
2572 /*
2573 * mast_ascend_free() - Add current original maple state nodes to the free list
2574 * and ascend.
2575 * @mast: the maple subtree state.
2576 *
2577 * Ascend the original left and right sides and add the previous nodes to the
2578 * free list. Set the slots to point to the correct location in the new nodes.
2579 */
2580 static inline void
2581 mast_ascend_free(struct maple_subtree_state *mast)
2582 {
2583 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2584 struct maple_enode *left = mast->orig_l->node;
2585 struct maple_enode *right = mast->orig_r->node;
2586
2587 mas_ascend(mast->orig_l);
2588 mas_ascend(mast->orig_r);
2589 mat_add(mast->free, left);
2590
2591 if (left != right)
2592 mat_add(mast->free, right);
2593
2594 mast->orig_r->offset = 0;
2595 mast->orig_r->index = mast->r->max;
2596 /* last should be larger than or equal to index */
2597 if (mast->orig_r->last < mast->orig_r->index)
2598 mast->orig_r->last = mast->orig_r->index;
2599 /*
2600 * The node may not contain the value so set slot to ensure all
2601 * of the nodes contents are freed or destroyed.
2602 */
2603 wr_mas.type = mte_node_type(mast->orig_r->node);
2604 mas_wr_node_walk(&wr_mas);
2605 /* Set up the left side of things */
2606 mast->orig_l->offset = 0;
2607 mast->orig_l->index = mast->l->min;
2608 wr_mas.mas = mast->orig_l;
2609 wr_mas.type = mte_node_type(mast->orig_l->node);
2610 mas_wr_node_walk(&wr_mas);
2611
2612 mast->bn->type = wr_mas.type;
2613 }
2614
2615 /*
2616 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2617 * @mas: the maple state with the allocations.
2618 * @b_node: the maple_big_node with the type encoding.
2619 *
2620 * Use the node type from the maple_big_node to allocate a new node from the
2621 * ma_state. This function exists mainly for code readability.
2622 *
2623 * Return: A new maple encoded node
2624 */
2625 static inline struct maple_enode
2626 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2627 {
2628 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2629 }
2630
2631 /*
2632 * mas_mab_to_node() - Set up right and middle nodes
2633 *
2634 * @mas: the maple state that contains the allocations.
2635 * @b_node: the node which contains the data.
2636 * @left: The pointer which will have the left node
2637 * @right: The pointer which may have the right node
2638 * @middle: the pointer which may have the middle node (rare)
2639 * @mid_split: the split location for the middle node
2640 *
2641 * Return: the split of left.
2642 */
2643 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2644 struct maple_big_node *b_node, struct maple_enode **left,
2645 struct maple_enode **right, struct maple_enode **middle,
2646 unsigned char *mid_split, unsigned long min)
2647 {
2648 unsigned char split = 0;
2649 unsigned char slot_count = mt_slots[b_node->type];
2650
2651 *left = mas_new_ma_node(mas, b_node);
2652 *right = NULL;
2653 *middle = NULL;
2654 *mid_split = 0;
2655
2656 if (b_node->b_end < slot_count) {
2657 split = b_node->b_end;
2658 } else {
2659 split = mab_calc_split(mas, b_node, mid_split, min);
2660 *right = mas_new_ma_node(mas, b_node);
2661 }
2662
2663 if (*mid_split)
2664 *middle = mas_new_ma_node(mas, b_node);
2665
2666 return split;
2667
2668 }
2669
2670 /*
2671 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2672 * pointer.
2673 * @b_node - the big node to add the entry
2674 * @mas - the maple state to get the pivot (mas->max)
2675 * @entry - the entry to add, if NULL nothing happens.
2676 */
2677 static inline void mab_set_b_end(struct maple_big_node *b_node,
2678 struct ma_state *mas,
2679 void *entry)
2680 {
2681 if (!entry)
2682 return;
2683
2684 b_node->slot[b_node->b_end] = entry;
2685 if (mt_is_alloc(mas->tree))
2686 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2687 b_node->pivot[b_node->b_end++] = mas->max;
2688 }
2689
2690 /*
2691 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2692 * of @mas->node to either @left or @right, depending on @slot and @split
2693 *
2694 * @mas - the maple state with the node that needs a parent
2695 * @left - possible parent 1
2696 * @right - possible parent 2
2697 * @slot - the slot the mas->node was placed
2698 * @split - the split location between @left and @right
2699 */
2700 static inline void mas_set_split_parent(struct ma_state *mas,
2701 struct maple_enode *left,
2702 struct maple_enode *right,
2703 unsigned char *slot, unsigned char split)
2704 {
2705 if (mas_is_none(mas))
2706 return;
2707
2708 if ((*slot) <= split)
2709 mas_set_parent(mas, mas->node, left, *slot);
2710 else if (right)
2711 mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2712
2713 (*slot)++;
2714 }
2715
2716 /*
2717 * mte_mid_split_check() - Check if the next node passes the mid-split
2718 * @**l: Pointer to left encoded maple node.
2719 * @**m: Pointer to middle encoded maple node.
2720 * @**r: Pointer to right encoded maple node.
2721 * @slot: The offset
2722 * @*split: The split location.
2723 * @mid_split: The middle split.
2724 */
2725 static inline void mte_mid_split_check(struct maple_enode **l,
2726 struct maple_enode **r,
2727 struct maple_enode *right,
2728 unsigned char slot,
2729 unsigned char *split,
2730 unsigned char mid_split)
2731 {
2732 if (*r == right)
2733 return;
2734
2735 if (slot < mid_split)
2736 return;
2737
2738 *l = *r;
2739 *r = right;
2740 *split = mid_split;
2741 }
2742
2743 /*
2744 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2745 * is taken from @mast->l.
2746 * @mast - the maple subtree state
2747 * @left - the left node
2748 * @right - the right node
2749 * @split - the split location.
2750 */
2751 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2752 struct maple_enode *left,
2753 struct maple_enode *middle,
2754 struct maple_enode *right,
2755 unsigned char split,
2756 unsigned char mid_split)
2757 {
2758 unsigned char slot;
2759 struct maple_enode *l = left;
2760 struct maple_enode *r = right;
2761
2762 if (mas_is_none(mast->l))
2763 return;
2764
2765 if (middle)
2766 r = middle;
2767
2768 slot = mast->l->offset;
2769
2770 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2771 mas_set_split_parent(mast->l, l, r, &slot, split);
2772
2773 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2774 mas_set_split_parent(mast->m, l, r, &slot, split);
2775
2776 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2777 mas_set_split_parent(mast->r, l, r, &slot, split);
2778 }
2779
2780 /*
2781 * mas_wmb_replace() - Write memory barrier and replace
2782 * @mas: The maple state
2783 * @free: the maple topiary list of nodes to free
2784 * @destroy: The maple topiary list of nodes to destroy (walk and free)
2785 *
2786 * Updates gap as necessary.
2787 */
2788 static inline void mas_wmb_replace(struct ma_state *mas,
2789 struct ma_topiary *free,
2790 struct ma_topiary *destroy)
2791 {
2792 /* All nodes must see old data as dead prior to replacing that data */
2793 smp_wmb(); /* Needed for RCU */
2794
2795 /* Insert the new data in the tree */
2796 mas_replace(mas, true);
2797
2798 if (!mte_is_leaf(mas->node))
2799 mas_descend_adopt(mas);
2800
2801 mas_mat_free(mas, free);
2802
2803 if (destroy)
2804 mas_mat_destroy(mas, destroy);
2805
2806 if (mte_is_leaf(mas->node))
2807 return;
2808
2809 mas_update_gap(mas);
2810 }
2811
2812 /*
2813 * mast_new_root() - Set a new tree root during subtree creation
2814 * @mast: The maple subtree state
2815 * @mas: The maple state
2816 */
2817 static inline void mast_new_root(struct maple_subtree_state *mast,
2818 struct ma_state *mas)
2819 {
2820 mas_mn(mast->l)->parent =
2821 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2822 if (!mte_dead_node(mast->orig_l->node) &&
2823 !mte_is_root(mast->orig_l->node)) {
2824 do {
2825 mast_ascend_free(mast);
2826 mast_topiary(mast);
2827 } while (!mte_is_root(mast->orig_l->node));
2828 }
2829 if ((mast->orig_l->node != mas->node) &&
2830 (mast->l->depth > mas_mt_height(mas))) {
2831 mat_add(mast->free, mas->node);
2832 }
2833 }
2834
2835 /*
2836 * mast_cp_to_nodes() - Copy data out to nodes.
2837 * @mast: The maple subtree state
2838 * @left: The left encoded maple node
2839 * @middle: The middle encoded maple node
2840 * @right: The right encoded maple node
2841 * @split: The location to split between left and (middle ? middle : right)
2842 * @mid_split: The location to split between middle and right.
2843 */
2844 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2845 struct maple_enode *left, struct maple_enode *middle,
2846 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2847 {
2848 bool new_lmax = true;
2849
2850 mast->l->node = mte_node_or_none(left);
2851 mast->m->node = mte_node_or_none(middle);
2852 mast->r->node = mte_node_or_none(right);
2853
2854 mast->l->min = mast->orig_l->min;
2855 if (split == mast->bn->b_end) {
2856 mast->l->max = mast->orig_r->max;
2857 new_lmax = false;
2858 }
2859
2860 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2861
2862 if (middle) {
2863 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2864 mast->m->min = mast->bn->pivot[split] + 1;
2865 split = mid_split;
2866 }
2867
2868 mast->r->max = mast->orig_r->max;
2869 if (right) {
2870 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2871 mast->r->min = mast->bn->pivot[split] + 1;
2872 }
2873 }
2874
2875 /*
2876 * mast_combine_cp_left - Copy in the original left side of the tree into the
2877 * combined data set in the maple subtree state big node.
2878 * @mast: The maple subtree state
2879 */
2880 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2881 {
2882 unsigned char l_slot = mast->orig_l->offset;
2883
2884 if (!l_slot)
2885 return;
2886
2887 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2888 }
2889
2890 /*
2891 * mast_combine_cp_right: Copy in the original right side of the tree into the
2892 * combined data set in the maple subtree state big node.
2893 * @mast: The maple subtree state
2894 */
2895 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2896 {
2897 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2898 return;
2899
2900 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2901 mt_slot_count(mast->orig_r->node), mast->bn,
2902 mast->bn->b_end);
2903 mast->orig_r->last = mast->orig_r->max;
2904 }
2905
2906 /*
2907 * mast_sufficient: Check if the maple subtree state has enough data in the big
2908 * node to create at least one sufficient node
2909 * @mast: the maple subtree state
2910 */
2911 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2912 {
2913 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2914 return true;
2915
2916 return false;
2917 }
2918
2919 /*
2920 * mast_overflow: Check if there is too much data in the subtree state for a
2921 * single node.
2922 * @mast: The maple subtree state
2923 */
2924 static inline bool mast_overflow(struct maple_subtree_state *mast)
2925 {
2926 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2927 return true;
2928
2929 return false;
2930 }
2931
2932 static inline void *mtree_range_walk(struct ma_state *mas)
2933 {
2934 unsigned long *pivots;
2935 unsigned char offset;
2936 struct maple_node *node;
2937 struct maple_enode *next, *last;
2938 enum maple_type type;
2939 void __rcu **slots;
2940 unsigned char end;
2941 unsigned long max, min;
2942 unsigned long prev_max, prev_min;
2943
2944 next = mas->node;
2945 min = mas->min;
2946 max = mas->max;
2947 do {
2948 offset = 0;
2949 last = next;
2950 node = mte_to_node(next);
2951 type = mte_node_type(next);
2952 pivots = ma_pivots(node, type);
2953 end = ma_data_end(node, type, pivots, max);
2954 if (unlikely(ma_dead_node(node)))
2955 goto dead_node;
2956
2957 if (pivots[offset] >= mas->index) {
2958 prev_max = max;
2959 prev_min = min;
2960 max = pivots[offset];
2961 goto next;
2962 }
2963
2964 do {
2965 offset++;
2966 } while ((offset < end) && (pivots[offset] < mas->index));
2967
2968 prev_min = min;
2969 min = pivots[offset - 1] + 1;
2970 prev_max = max;
2971 if (likely(offset < end && pivots[offset]))
2972 max = pivots[offset];
2973
2974 next:
2975 slots = ma_slots(node, type);
2976 next = mt_slot(mas->tree, slots, offset);
2977 if (unlikely(ma_dead_node(node)))
2978 goto dead_node;
2979 } while (!ma_is_leaf(type));
2980
2981 mas->offset = offset;
2982 mas->index = min;
2983 mas->last = max;
2984 mas->min = prev_min;
2985 mas->max = prev_max;
2986 mas->node = last;
2987 return (void *)next;
2988
2989 dead_node:
2990 mas_reset(mas);
2991 return NULL;
2992 }
2993
2994 /*
2995 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2996 * @mas: The starting maple state
2997 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2998 * @count: The estimated count of iterations needed.
2999 *
3000 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3001 * is hit. First @b_node is split into two entries which are inserted into the
3002 * next iteration of the loop. @b_node is returned populated with the final
3003 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3004 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3005 * to account of what has been copied into the new sub-tree. The update of
3006 * orig_l_mas->last is used in mas_consume to find the slots that will need to
3007 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3008 * the new sub-tree in case the sub-tree becomes the full tree.
3009 *
3010 * Return: the number of elements in b_node during the last loop.
3011 */
3012 static int mas_spanning_rebalance(struct ma_state *mas,
3013 struct maple_subtree_state *mast, unsigned char count)
3014 {
3015 unsigned char split, mid_split;
3016 unsigned char slot = 0;
3017 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3018
3019 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3020 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3021 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3022 MA_TOPIARY(free, mas->tree);
3023 MA_TOPIARY(destroy, mas->tree);
3024
3025 /*
3026 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3027 * Rebalancing is done by use of the ``struct maple_topiary``.
3028 */
3029 mast->l = &l_mas;
3030 mast->m = &m_mas;
3031 mast->r = &r_mas;
3032 mast->free = &free;
3033 mast->destroy = &destroy;
3034 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3035
3036 /* Check if this is not root and has sufficient data. */
3037 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3038 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3039 mast_spanning_rebalance(mast);
3040
3041 mast->orig_l->depth = 0;
3042
3043 /*
3044 * Each level of the tree is examined and balanced, pushing data to the left or
3045 * right, or rebalancing against left or right nodes is employed to avoid
3046 * rippling up the tree to limit the amount of churn. Once a new sub-section of
3047 * the tree is created, there may be a mix of new and old nodes. The old nodes
3048 * will have the incorrect parent pointers and currently be in two trees: the
3049 * original tree and the partially new tree. To remedy the parent pointers in
3050 * the old tree, the new data is swapped into the active tree and a walk down
3051 * the tree is performed and the parent pointers are updated.
3052 * See mas_descend_adopt() for more information..
3053 */
3054 while (count--) {
3055 mast->bn->b_end--;
3056 mast->bn->type = mte_node_type(mast->orig_l->node);
3057 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3058 &mid_split, mast->orig_l->min);
3059 mast_set_split_parents(mast, left, middle, right, split,
3060 mid_split);
3061 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3062
3063 /*
3064 * Copy data from next level in the tree to mast->bn from next
3065 * iteration
3066 */
3067 memset(mast->bn, 0, sizeof(struct maple_big_node));
3068 mast->bn->type = mte_node_type(left);
3069 mast->orig_l->depth++;
3070
3071 /* Root already stored in l->node. */
3072 if (mas_is_root_limits(mast->l))
3073 goto new_root;
3074
3075 mast_ascend_free(mast);
3076 mast_combine_cp_left(mast);
3077 l_mas.offset = mast->bn->b_end;
3078 mab_set_b_end(mast->bn, &l_mas, left);
3079 mab_set_b_end(mast->bn, &m_mas, middle);
3080 mab_set_b_end(mast->bn, &r_mas, right);
3081
3082 /* Copy anything necessary out of the right node. */
3083 mast_combine_cp_right(mast);
3084 mast_topiary(mast);
3085 mast->orig_l->last = mast->orig_l->max;
3086
3087 if (mast_sufficient(mast))
3088 continue;
3089
3090 if (mast_overflow(mast))
3091 continue;
3092
3093 /* May be a new root stored in mast->bn */
3094 if (mas_is_root_limits(mast->orig_l))
3095 break;
3096
3097 mast_spanning_rebalance(mast);
3098
3099 /* rebalancing from other nodes may require another loop. */
3100 if (!count)
3101 count++;
3102 }
3103
3104 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3105 mte_node_type(mast->orig_l->node));
3106 mast->orig_l->depth++;
3107 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3108 mas_set_parent(mas, left, l_mas.node, slot);
3109 if (middle)
3110 mas_set_parent(mas, middle, l_mas.node, ++slot);
3111
3112 if (right)
3113 mas_set_parent(mas, right, l_mas.node, ++slot);
3114
3115 if (mas_is_root_limits(mast->l)) {
3116 new_root:
3117 mast_new_root(mast, mas);
3118 } else {
3119 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3120 }
3121
3122 if (!mte_dead_node(mast->orig_l->node))
3123 mat_add(&free, mast->orig_l->node);
3124
3125 mas->depth = mast->orig_l->depth;
3126 *mast->orig_l = l_mas;
3127 mte_set_node_dead(mas->node);
3128
3129 /* Set up mas for insertion. */
3130 mast->orig_l->depth = mas->depth;
3131 mast->orig_l->alloc = mas->alloc;
3132 *mas = *mast->orig_l;
3133 mas_wmb_replace(mas, &free, &destroy);
3134 mtree_range_walk(mas);
3135 return mast->bn->b_end;
3136 }
3137
3138 /*
3139 * mas_rebalance() - Rebalance a given node.
3140 * @mas: The maple state
3141 * @b_node: The big maple node.
3142 *
3143 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3144 * Continue upwards until tree is sufficient.
3145 *
3146 * Return: the number of elements in b_node during the last loop.
3147 */
3148 static inline int mas_rebalance(struct ma_state *mas,
3149 struct maple_big_node *b_node)
3150 {
3151 char empty_count = mas_mt_height(mas);
3152 struct maple_subtree_state mast;
3153 unsigned char shift, b_end = ++b_node->b_end;
3154
3155 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3156 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3157
3158 trace_ma_op(__func__, mas);
3159
3160 /*
3161 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3162 * against the node to the right if it exists, otherwise the node to the
3163 * left of this node is rebalanced against this node. If rebalancing
3164 * causes just one node to be produced instead of two, then the parent
3165 * is also examined and rebalanced if it is insufficient. Every level
3166 * tries to combine the data in the same way. If one node contains the
3167 * entire range of the tree, then that node is used as a new root node.
3168 */
3169 mas_node_count(mas, 1 + empty_count * 3);
3170 if (mas_is_err(mas))
3171 return 0;
3172
3173 mast.orig_l = &l_mas;
3174 mast.orig_r = &r_mas;
3175 mast.bn = b_node;
3176 mast.bn->type = mte_node_type(mas->node);
3177
3178 l_mas = r_mas = *mas;
3179
3180 if (mas_next_sibling(&r_mas)) {
3181 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3182 r_mas.last = r_mas.index = r_mas.max;
3183 } else {
3184 mas_prev_sibling(&l_mas);
3185 shift = mas_data_end(&l_mas) + 1;
3186 mab_shift_right(b_node, shift);
3187 mas->offset += shift;
3188 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3189 b_node->b_end = shift + b_end;
3190 l_mas.index = l_mas.last = l_mas.min;
3191 }
3192
3193 return mas_spanning_rebalance(mas, &mast, empty_count);
3194 }
3195
3196 /*
3197 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3198 * state.
3199 * @mas: The maple state
3200 * @end: The end of the left-most node.
3201 *
3202 * During a mass-insert event (such as forking), it may be necessary to
3203 * rebalance the left-most node when it is not sufficient.
3204 */
3205 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3206 {
3207 enum maple_type mt = mte_node_type(mas->node);
3208 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3209 struct maple_enode *eparent;
3210 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3211 void __rcu **l_slots, **slots;
3212 unsigned long *l_pivs, *pivs, gap;
3213 bool in_rcu = mt_in_rcu(mas->tree);
3214
3215 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3216
3217 l_mas = *mas;
3218 mas_prev_sibling(&l_mas);
3219
3220 /* set up node. */
3221 if (in_rcu) {
3222 /* Allocate for both left and right as well as parent. */
3223 mas_node_count(mas, 3);
3224 if (mas_is_err(mas))
3225 return;
3226
3227 newnode = mas_pop_node(mas);
3228 } else {
3229 newnode = &reuse;
3230 }
3231
3232 node = mas_mn(mas);
3233 newnode->parent = node->parent;
3234 slots = ma_slots(newnode, mt);
3235 pivs = ma_pivots(newnode, mt);
3236 left = mas_mn(&l_mas);
3237 l_slots = ma_slots(left, mt);
3238 l_pivs = ma_pivots(left, mt);
3239 if (!l_slots[split])
3240 split++;
3241 tmp = mas_data_end(&l_mas) - split;
3242
3243 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3244 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3245 pivs[tmp] = l_mas.max;
3246 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3247 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3248
3249 l_mas.max = l_pivs[split];
3250 mas->min = l_mas.max + 1;
3251 eparent = mt_mk_node(mte_parent(l_mas.node),
3252 mas_parent_type(&l_mas, l_mas.node));
3253 tmp += end;
3254 if (!in_rcu) {
3255 unsigned char max_p = mt_pivots[mt];
3256 unsigned char max_s = mt_slots[mt];
3257
3258 if (tmp < max_p)
3259 memset(pivs + tmp, 0,
3260 sizeof(unsigned long) * (max_p - tmp));
3261
3262 if (tmp < mt_slots[mt])
3263 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3264
3265 memcpy(node, newnode, sizeof(struct maple_node));
3266 ma_set_meta(node, mt, 0, tmp - 1);
3267 mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3268 l_pivs[split]);
3269
3270 /* Remove data from l_pivs. */
3271 tmp = split + 1;
3272 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3273 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3274 ma_set_meta(left, mt, 0, split);
3275
3276 goto done;
3277 }
3278
3279 /* RCU requires replacing both l_mas, mas, and parent. */
3280 mas->node = mt_mk_node(newnode, mt);
3281 ma_set_meta(newnode, mt, 0, tmp);
3282
3283 new_left = mas_pop_node(mas);
3284 new_left->parent = left->parent;
3285 mt = mte_node_type(l_mas.node);
3286 slots = ma_slots(new_left, mt);
3287 pivs = ma_pivots(new_left, mt);
3288 memcpy(slots, l_slots, sizeof(void *) * split);
3289 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3290 ma_set_meta(new_left, mt, 0, split);
3291 l_mas.node = mt_mk_node(new_left, mt);
3292
3293 /* replace parent. */
3294 offset = mte_parent_slot(mas->node);
3295 mt = mas_parent_type(&l_mas, l_mas.node);
3296 parent = mas_pop_node(mas);
3297 slots = ma_slots(parent, mt);
3298 pivs = ma_pivots(parent, mt);
3299 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3300 rcu_assign_pointer(slots[offset], mas->node);
3301 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3302 pivs[offset - 1] = l_mas.max;
3303 eparent = mt_mk_node(parent, mt);
3304 done:
3305 gap = mas_leaf_max_gap(mas);
3306 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3307 gap = mas_leaf_max_gap(&l_mas);
3308 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3309 mas_ascend(mas);
3310
3311 if (in_rcu)
3312 mas_replace(mas, false);
3313
3314 mas_update_gap(mas);
3315 }
3316
3317 /*
3318 * mas_split_final_node() - Split the final node in a subtree operation.
3319 * @mast: the maple subtree state
3320 * @mas: The maple state
3321 * @height: The height of the tree in case it's a new root.
3322 */
3323 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3324 struct ma_state *mas, int height)
3325 {
3326 struct maple_enode *ancestor;
3327
3328 if (mte_is_root(mas->node)) {
3329 if (mt_is_alloc(mas->tree))
3330 mast->bn->type = maple_arange_64;
3331 else
3332 mast->bn->type = maple_range_64;
3333 mas->depth = height;
3334 }
3335 /*
3336 * Only a single node is used here, could be root.
3337 * The Big_node data should just fit in a single node.
3338 */
3339 ancestor = mas_new_ma_node(mas, mast->bn);
3340 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3341 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3342 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3343
3344 mast->l->node = ancestor;
3345 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3346 mas->offset = mast->bn->b_end - 1;
3347 return true;
3348 }
3349
3350 /*
3351 * mast_fill_bnode() - Copy data into the big node in the subtree state
3352 * @mast: The maple subtree state
3353 * @mas: the maple state
3354 * @skip: The number of entries to skip for new nodes insertion.
3355 */
3356 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3357 struct ma_state *mas,
3358 unsigned char skip)
3359 {
3360 bool cp = true;
3361 struct maple_enode *old = mas->node;
3362 unsigned char split;
3363
3364 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3365 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3366 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3367 mast->bn->b_end = 0;
3368
3369 if (mte_is_root(mas->node)) {
3370 cp = false;
3371 } else {
3372 mas_ascend(mas);
3373 mat_add(mast->free, old);
3374 mas->offset = mte_parent_slot(mas->node);
3375 }
3376
3377 if (cp && mast->l->offset)
3378 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3379
3380 split = mast->bn->b_end;
3381 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3382 mast->r->offset = mast->bn->b_end;
3383 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3384 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3385 cp = false;
3386
3387 if (cp)
3388 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3389 mast->bn, mast->bn->b_end);
3390
3391 mast->bn->b_end--;
3392 mast->bn->type = mte_node_type(mas->node);
3393 }
3394
3395 /*
3396 * mast_split_data() - Split the data in the subtree state big node into regular
3397 * nodes.
3398 * @mast: The maple subtree state
3399 * @mas: The maple state
3400 * @split: The location to split the big node
3401 */
3402 static inline void mast_split_data(struct maple_subtree_state *mast,
3403 struct ma_state *mas, unsigned char split)
3404 {
3405 unsigned char p_slot;
3406
3407 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3408 mte_set_pivot(mast->r->node, 0, mast->r->max);
3409 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3410 mast->l->offset = mte_parent_slot(mas->node);
3411 mast->l->max = mast->bn->pivot[split];
3412 mast->r->min = mast->l->max + 1;
3413 if (mte_is_leaf(mas->node))
3414 return;
3415
3416 p_slot = mast->orig_l->offset;
3417 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3418 &p_slot, split);
3419 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3420 &p_slot, split);
3421 }
3422
3423 /*
3424 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3425 * data to the right or left node if there is room.
3426 * @mas: The maple state
3427 * @height: The current height of the maple state
3428 * @mast: The maple subtree state
3429 * @left: Push left or not.
3430 *
3431 * Keeping the height of the tree low means faster lookups.
3432 *
3433 * Return: True if pushed, false otherwise.
3434 */
3435 static inline bool mas_push_data(struct ma_state *mas, int height,
3436 struct maple_subtree_state *mast, bool left)
3437 {
3438 unsigned char slot_total = mast->bn->b_end;
3439 unsigned char end, space, split;
3440
3441 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3442 tmp_mas = *mas;
3443 tmp_mas.depth = mast->l->depth;
3444
3445 if (left && !mas_prev_sibling(&tmp_mas))
3446 return false;
3447 else if (!left && !mas_next_sibling(&tmp_mas))
3448 return false;
3449
3450 end = mas_data_end(&tmp_mas);
3451 slot_total += end;
3452 space = 2 * mt_slot_count(mas->node) - 2;
3453 /* -2 instead of -1 to ensure there isn't a triple split */
3454 if (ma_is_leaf(mast->bn->type))
3455 space--;
3456
3457 if (mas->max == ULONG_MAX)
3458 space--;
3459
3460 if (slot_total >= space)
3461 return false;
3462
3463 /* Get the data; Fill mast->bn */
3464 mast->bn->b_end++;
3465 if (left) {
3466 mab_shift_right(mast->bn, end + 1);
3467 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3468 mast->bn->b_end = slot_total + 1;
3469 } else {
3470 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3471 }
3472
3473 /* Configure mast for splitting of mast->bn */
3474 split = mt_slots[mast->bn->type] - 2;
3475 if (left) {
3476 /* Switch mas to prev node */
3477 mat_add(mast->free, mas->node);
3478 *mas = tmp_mas;
3479 /* Start using mast->l for the left side. */
3480 tmp_mas.node = mast->l->node;
3481 *mast->l = tmp_mas;
3482 } else {
3483 mat_add(mast->free, tmp_mas.node);
3484 tmp_mas.node = mast->r->node;
3485 *mast->r = tmp_mas;
3486 split = slot_total - split;
3487 }
3488 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3489 /* Update parent slot for split calculation. */
3490 if (left)
3491 mast->orig_l->offset += end + 1;
3492
3493 mast_split_data(mast, mas, split);
3494 mast_fill_bnode(mast, mas, 2);
3495 mas_split_final_node(mast, mas, height + 1);
3496 return true;
3497 }
3498
3499 /*
3500 * mas_split() - Split data that is too big for one node into two.
3501 * @mas: The maple state
3502 * @b_node: The maple big node
3503 * Return: 1 on success, 0 on failure.
3504 */
3505 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3506 {
3507 struct maple_subtree_state mast;
3508 int height = 0;
3509 unsigned char mid_split, split = 0;
3510
3511 /*
3512 * Splitting is handled differently from any other B-tree; the Maple
3513 * Tree splits upwards. Splitting up means that the split operation
3514 * occurs when the walk of the tree hits the leaves and not on the way
3515 * down. The reason for splitting up is that it is impossible to know
3516 * how much space will be needed until the leaf is (or leaves are)
3517 * reached. Since overwriting data is allowed and a range could
3518 * overwrite more than one range or result in changing one entry into 3
3519 * entries, it is impossible to know if a split is required until the
3520 * data is examined.
3521 *
3522 * Splitting is a balancing act between keeping allocations to a minimum
3523 * and avoiding a 'jitter' event where a tree is expanded to make room
3524 * for an entry followed by a contraction when the entry is removed. To
3525 * accomplish the balance, there are empty slots remaining in both left
3526 * and right nodes after a split.
3527 */
3528 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3529 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3530 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3531 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3532 MA_TOPIARY(mat, mas->tree);
3533
3534 trace_ma_op(__func__, mas);
3535 mas->depth = mas_mt_height(mas);
3536 /* Allocation failures will happen early. */
3537 mas_node_count(mas, 1 + mas->depth * 2);
3538 if (mas_is_err(mas))
3539 return 0;
3540
3541 mast.l = &l_mas;
3542 mast.r = &r_mas;
3543 mast.orig_l = &prev_l_mas;
3544 mast.orig_r = &prev_r_mas;
3545 mast.free = &mat;
3546 mast.bn = b_node;
3547
3548 while (height++ <= mas->depth) {
3549 if (mt_slots[b_node->type] > b_node->b_end) {
3550 mas_split_final_node(&mast, mas, height);
3551 break;
3552 }
3553
3554 l_mas = r_mas = *mas;
3555 l_mas.node = mas_new_ma_node(mas, b_node);
3556 r_mas.node = mas_new_ma_node(mas, b_node);
3557 /*
3558 * Another way that 'jitter' is avoided is to terminate a split up early if the
3559 * left or right node has space to spare. This is referred to as "pushing left"
3560 * or "pushing right" and is similar to the B* tree, except the nodes left or
3561 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3562 * is a significant savings.
3563 */
3564 /* Try to push left. */
3565 if (mas_push_data(mas, height, &mast, true))
3566 break;
3567
3568 /* Try to push right. */
3569 if (mas_push_data(mas, height, &mast, false))
3570 break;
3571
3572 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3573 mast_split_data(&mast, mas, split);
3574 /*
3575 * Usually correct, mab_mas_cp in the above call overwrites
3576 * r->max.
3577 */
3578 mast.r->max = mas->max;
3579 mast_fill_bnode(&mast, mas, 1);
3580 prev_l_mas = *mast.l;
3581 prev_r_mas = *mast.r;
3582 }
3583
3584 /* Set the original node as dead */
3585 mat_add(mast.free, mas->node);
3586 mas->node = l_mas.node;
3587 mas_wmb_replace(mas, mast.free, NULL);
3588 mtree_range_walk(mas);
3589 return 1;
3590 }
3591
3592 /*
3593 * mas_reuse_node() - Reuse the node to store the data.
3594 * @wr_mas: The maple write state
3595 * @bn: The maple big node
3596 * @end: The end of the data.
3597 *
3598 * Will always return false in RCU mode.
3599 *
3600 * Return: True if node was reused, false otherwise.
3601 */
3602 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3603 struct maple_big_node *bn, unsigned char end)
3604 {
3605 /* Need to be rcu safe. */
3606 if (mt_in_rcu(wr_mas->mas->tree))
3607 return false;
3608
3609 if (end > bn->b_end) {
3610 int clear = mt_slots[wr_mas->type] - bn->b_end;
3611
3612 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3613 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3614 }
3615 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3616 return true;
3617 }
3618
3619 /*
3620 * mas_commit_b_node() - Commit the big node into the tree.
3621 * @wr_mas: The maple write state
3622 * @b_node: The maple big node
3623 * @end: The end of the data.
3624 */
3625 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3626 struct maple_big_node *b_node, unsigned char end)
3627 {
3628 struct maple_node *node;
3629 unsigned char b_end = b_node->b_end;
3630 enum maple_type b_type = b_node->type;
3631
3632 if ((b_end < mt_min_slots[b_type]) &&
3633 (!mte_is_root(wr_mas->mas->node)) &&
3634 (mas_mt_height(wr_mas->mas) > 1))
3635 return mas_rebalance(wr_mas->mas, b_node);
3636
3637 if (b_end >= mt_slots[b_type])
3638 return mas_split(wr_mas->mas, b_node);
3639
3640 if (mas_reuse_node(wr_mas, b_node, end))
3641 goto reuse_node;
3642
3643 mas_node_count(wr_mas->mas, 1);
3644 if (mas_is_err(wr_mas->mas))
3645 return 0;
3646
3647 node = mas_pop_node(wr_mas->mas);
3648 node->parent = mas_mn(wr_mas->mas)->parent;
3649 wr_mas->mas->node = mt_mk_node(node, b_type);
3650 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3651 mas_replace(wr_mas->mas, false);
3652 reuse_node:
3653 mas_update_gap(wr_mas->mas);
3654 return 1;
3655 }
3656
3657 /*
3658 * mas_root_expand() - Expand a root to a node
3659 * @mas: The maple state
3660 * @entry: The entry to store into the tree
3661 */
3662 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3663 {
3664 void *contents = mas_root_locked(mas);
3665 enum maple_type type = maple_leaf_64;
3666 struct maple_node *node;
3667 void __rcu **slots;
3668 unsigned long *pivots;
3669 int slot = 0;
3670
3671 mas_node_count(mas, 1);
3672 if (unlikely(mas_is_err(mas)))
3673 return 0;
3674
3675 node = mas_pop_node(mas);
3676 pivots = ma_pivots(node, type);
3677 slots = ma_slots(node, type);
3678 node->parent = ma_parent_ptr(
3679 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3680 mas->node = mt_mk_node(node, type);
3681
3682 if (mas->index) {
3683 if (contents) {
3684 rcu_assign_pointer(slots[slot], contents);
3685 if (likely(mas->index > 1))
3686 slot++;
3687 }
3688 pivots[slot++] = mas->index - 1;
3689 }
3690
3691 rcu_assign_pointer(slots[slot], entry);
3692 mas->offset = slot;
3693 pivots[slot] = mas->last;
3694 if (mas->last != ULONG_MAX)
3695 slot++;
3696 mas->depth = 1;
3697 mas_set_height(mas);
3698 ma_set_meta(node, maple_leaf_64, 0, slot);
3699 /* swap the new root into the tree */
3700 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3701 return slot;
3702 }
3703
3704 static inline void mas_store_root(struct ma_state *mas, void *entry)
3705 {
3706 if (likely((mas->last != 0) || (mas->index != 0)))
3707 mas_root_expand(mas, entry);
3708 else if (((unsigned long) (entry) & 3) == 2)
3709 mas_root_expand(mas, entry);
3710 else {
3711 rcu_assign_pointer(mas->tree->ma_root, entry);
3712 mas->node = MAS_START;
3713 }
3714 }
3715
3716 /*
3717 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3718 * spans the node.
3719 * @mas: The maple state
3720 * @piv: The pivot value being written
3721 * @type: The maple node type
3722 * @entry: The data to write
3723 *
3724 * Spanning writes are writes that start in one node and end in another OR if
3725 * the write of a %NULL will cause the node to end with a %NULL.
3726 *
3727 * Return: True if this is a spanning write, false otherwise.
3728 */
3729 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3730 {
3731 unsigned long max;
3732 unsigned long last = wr_mas->mas->last;
3733 unsigned long piv = wr_mas->r_max;
3734 enum maple_type type = wr_mas->type;
3735 void *entry = wr_mas->entry;
3736
3737 /* Contained in this pivot */
3738 if (piv > last)
3739 return false;
3740
3741 max = wr_mas->mas->max;
3742 if (unlikely(ma_is_leaf(type))) {
3743 /* Fits in the node, but may span slots. */
3744 if (last < max)
3745 return false;
3746
3747 /* Writes to the end of the node but not null. */
3748 if ((last == max) && entry)
3749 return false;
3750
3751 /*
3752 * Writing ULONG_MAX is not a spanning write regardless of the
3753 * value being written as long as the range fits in the node.
3754 */
3755 if ((last == ULONG_MAX) && (last == max))
3756 return false;
3757 } else if (piv == last) {
3758 if (entry)
3759 return false;
3760
3761 /* Detect spanning store wr walk */
3762 if (last == ULONG_MAX)
3763 return false;
3764 }
3765
3766 trace_ma_write(__func__, wr_mas->mas, piv, entry);
3767
3768 return true;
3769 }
3770
3771 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3772 {
3773 wr_mas->type = mte_node_type(wr_mas->mas->node);
3774 mas_wr_node_walk(wr_mas);
3775 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3776 }
3777
3778 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3779 {
3780 wr_mas->mas->max = wr_mas->r_max;
3781 wr_mas->mas->min = wr_mas->r_min;
3782 wr_mas->mas->node = wr_mas->content;
3783 wr_mas->mas->offset = 0;
3784 wr_mas->mas->depth++;
3785 }
3786 /*
3787 * mas_wr_walk() - Walk the tree for a write.
3788 * @wr_mas: The maple write state
3789 *
3790 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3791 *
3792 * Return: True if it's contained in a node, false on spanning write.
3793 */
3794 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3795 {
3796 struct ma_state *mas = wr_mas->mas;
3797
3798 while (true) {
3799 mas_wr_walk_descend(wr_mas);
3800 if (unlikely(mas_is_span_wr(wr_mas)))
3801 return false;
3802
3803 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3804 mas->offset);
3805 if (ma_is_leaf(wr_mas->type))
3806 return true;
3807
3808 mas_wr_walk_traverse(wr_mas);
3809 }
3810
3811 return true;
3812 }
3813
3814 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3815 {
3816 struct ma_state *mas = wr_mas->mas;
3817
3818 while (true) {
3819 mas_wr_walk_descend(wr_mas);
3820 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3821 mas->offset);
3822 if (ma_is_leaf(wr_mas->type))
3823 return true;
3824 mas_wr_walk_traverse(wr_mas);
3825
3826 }
3827 return true;
3828 }
3829 /*
3830 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3831 * @l_wr_mas: The left maple write state
3832 * @r_wr_mas: The right maple write state
3833 */
3834 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3835 struct ma_wr_state *r_wr_mas)
3836 {
3837 struct ma_state *r_mas = r_wr_mas->mas;
3838 struct ma_state *l_mas = l_wr_mas->mas;
3839 unsigned char l_slot;
3840
3841 l_slot = l_mas->offset;
3842 if (!l_wr_mas->content)
3843 l_mas->index = l_wr_mas->r_min;
3844
3845 if ((l_mas->index == l_wr_mas->r_min) &&
3846 (l_slot &&
3847 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3848 if (l_slot > 1)
3849 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3850 else
3851 l_mas->index = l_mas->min;
3852
3853 l_mas->offset = l_slot - 1;
3854 }
3855
3856 if (!r_wr_mas->content) {
3857 if (r_mas->last < r_wr_mas->r_max)
3858 r_mas->last = r_wr_mas->r_max;
3859 r_mas->offset++;
3860 } else if ((r_mas->last == r_wr_mas->r_max) &&
3861 (r_mas->last < r_mas->max) &&
3862 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3863 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3864 r_wr_mas->type, r_mas->offset + 1);
3865 r_mas->offset++;
3866 }
3867 }
3868
3869 static inline void *mas_state_walk(struct ma_state *mas)
3870 {
3871 void *entry;
3872
3873 entry = mas_start(mas);
3874 if (mas_is_none(mas))
3875 return NULL;
3876
3877 if (mas_is_ptr(mas))
3878 return entry;
3879
3880 return mtree_range_walk(mas);
3881 }
3882
3883 /*
3884 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3885 * to date.
3886 *
3887 * @mas: The maple state.
3888 *
3889 * Note: Leaves mas in undesirable state.
3890 * Return: The entry for @mas->index or %NULL on dead node.
3891 */
3892 static inline void *mtree_lookup_walk(struct ma_state *mas)
3893 {
3894 unsigned long *pivots;
3895 unsigned char offset;
3896 struct maple_node *node;
3897 struct maple_enode *next;
3898 enum maple_type type;
3899 void __rcu **slots;
3900 unsigned char end;
3901 unsigned long max;
3902
3903 next = mas->node;
3904 max = ULONG_MAX;
3905 do {
3906 offset = 0;
3907 node = mte_to_node(next);
3908 type = mte_node_type(next);
3909 pivots = ma_pivots(node, type);
3910 end = ma_data_end(node, type, pivots, max);
3911 if (unlikely(ma_dead_node(node)))
3912 goto dead_node;
3913 do {
3914 if (pivots[offset] >= mas->index) {
3915 max = pivots[offset];
3916 break;
3917 }
3918 } while (++offset < end);
3919
3920 slots = ma_slots(node, type);
3921 next = mt_slot(mas->tree, slots, offset);
3922 if (unlikely(ma_dead_node(node)))
3923 goto dead_node;
3924 } while (!ma_is_leaf(type));
3925
3926 return (void *)next;
3927
3928 dead_node:
3929 mas_reset(mas);
3930 return NULL;
3931 }
3932
3933 /*
3934 * mas_new_root() - Create a new root node that only contains the entry passed
3935 * in.
3936 * @mas: The maple state
3937 * @entry: The entry to store.
3938 *
3939 * Only valid when the index == 0 and the last == ULONG_MAX
3940 *
3941 * Return 0 on error, 1 on success.
3942 */
3943 static inline int mas_new_root(struct ma_state *mas, void *entry)
3944 {
3945 struct maple_enode *root = mas_root_locked(mas);
3946 enum maple_type type = maple_leaf_64;
3947 struct maple_node *node;
3948 void __rcu **slots;
3949 unsigned long *pivots;
3950
3951 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3952 mas->depth = 0;
3953 mas_set_height(mas);
3954 rcu_assign_pointer(mas->tree->ma_root, entry);
3955 mas->node = MAS_START;
3956 goto done;
3957 }
3958
3959 mas_node_count(mas, 1);
3960 if (mas_is_err(mas))
3961 return 0;
3962
3963 node = mas_pop_node(mas);
3964 pivots = ma_pivots(node, type);
3965 slots = ma_slots(node, type);
3966 node->parent = ma_parent_ptr(
3967 ((unsigned long)mas->tree | MA_ROOT_PARENT));
3968 mas->node = mt_mk_node(node, type);
3969 rcu_assign_pointer(slots[0], entry);
3970 pivots[0] = mas->last;
3971 mas->depth = 1;
3972 mas_set_height(mas);
3973 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3974
3975 done:
3976 if (xa_is_node(root))
3977 mte_destroy_walk(root, mas->tree);
3978
3979 return 1;
3980 }
3981 /*
3982 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3983 * and new nodes where necessary, then place the sub-tree in the actual tree.
3984 * Note that mas is expected to point to the node which caused the store to
3985 * span.
3986 * @wr_mas: The maple write state
3987 *
3988 * Return: 0 on error, positive on success.
3989 */
3990 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3991 {
3992 struct maple_subtree_state mast;
3993 struct maple_big_node b_node;
3994 struct ma_state *mas;
3995 unsigned char height;
3996
3997 /* Left and Right side of spanning store */
3998 MA_STATE(l_mas, NULL, 0, 0);
3999 MA_STATE(r_mas, NULL, 0, 0);
4000
4001 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
4002 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
4003
4004 /*
4005 * A store operation that spans multiple nodes is called a spanning
4006 * store and is handled early in the store call stack by the function
4007 * mas_is_span_wr(). When a spanning store is identified, the maple
4008 * state is duplicated. The first maple state walks the left tree path
4009 * to ``index``, the duplicate walks the right tree path to ``last``.
4010 * The data in the two nodes are combined into a single node, two nodes,
4011 * or possibly three nodes (see the 3-way split above). A ``NULL``
4012 * written to the last entry of a node is considered a spanning store as
4013 * a rebalance is required for the operation to complete and an overflow
4014 * of data may happen.
4015 */
4016 mas = wr_mas->mas;
4017 trace_ma_op(__func__, mas);
4018
4019 if (unlikely(!mas->index && mas->last == ULONG_MAX))
4020 return mas_new_root(mas, wr_mas->entry);
4021 /*
4022 * Node rebalancing may occur due to this store, so there may be three new
4023 * entries per level plus a new root.
4024 */
4025 height = mas_mt_height(mas);
4026 mas_node_count(mas, 1 + height * 3);
4027 if (mas_is_err(mas))
4028 return 0;
4029
4030 /*
4031 * Set up right side. Need to get to the next offset after the spanning
4032 * store to ensure it's not NULL and to combine both the next node and
4033 * the node with the start together.
4034 */
4035 r_mas = *mas;
4036 /* Avoid overflow, walk to next slot in the tree. */
4037 if (r_mas.last + 1)
4038 r_mas.last++;
4039
4040 r_mas.index = r_mas.last;
4041 mas_wr_walk_index(&r_wr_mas);
4042 r_mas.last = r_mas.index = mas->last;
4043
4044 /* Set up left side. */
4045 l_mas = *mas;
4046 mas_wr_walk_index(&l_wr_mas);
4047
4048 if (!wr_mas->entry) {
4049 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4050 mas->offset = l_mas.offset;
4051 mas->index = l_mas.index;
4052 mas->last = l_mas.last = r_mas.last;
4053 }
4054
4055 /* expanding NULLs may make this cover the entire range */
4056 if (!l_mas.index && r_mas.last == ULONG_MAX) {
4057 mas_set_range(mas, 0, ULONG_MAX);
4058 return mas_new_root(mas, wr_mas->entry);
4059 }
4060
4061 memset(&b_node, 0, sizeof(struct maple_big_node));
4062 /* Copy l_mas and store the value in b_node. */
4063 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4064 /* Copy r_mas into b_node. */
4065 if (r_mas.offset <= r_wr_mas.node_end)
4066 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4067 &b_node, b_node.b_end + 1);
4068 else
4069 b_node.b_end++;
4070
4071 /* Stop spanning searches by searching for just index. */
4072 l_mas.index = l_mas.last = mas->index;
4073
4074 mast.bn = &b_node;
4075 mast.orig_l = &l_mas;
4076 mast.orig_r = &r_mas;
4077 /* Combine l_mas and r_mas and split them up evenly again. */
4078 return mas_spanning_rebalance(mas, &mast, height + 1);
4079 }
4080
4081 /*
4082 * mas_wr_node_store() - Attempt to store the value in a node
4083 * @wr_mas: The maple write state
4084 *
4085 * Attempts to reuse the node, but may allocate.
4086 *
4087 * Return: True if stored, false otherwise
4088 */
4089 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4090 {
4091 struct ma_state *mas = wr_mas->mas;
4092 void __rcu **dst_slots;
4093 unsigned long *dst_pivots;
4094 unsigned char dst_offset;
4095 unsigned char new_end = wr_mas->node_end;
4096 unsigned char offset;
4097 unsigned char node_slots = mt_slots[wr_mas->type];
4098 struct maple_node reuse, *newnode;
4099 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4100 bool in_rcu = mt_in_rcu(mas->tree);
4101
4102 offset = mas->offset;
4103 if (mas->last == wr_mas->r_max) {
4104 /* runs right to the end of the node */
4105 if (mas->last == mas->max)
4106 new_end = offset;
4107 /* don't copy this offset */
4108 wr_mas->offset_end++;
4109 } else if (mas->last < wr_mas->r_max) {
4110 /* new range ends in this range */
4111 if (unlikely(wr_mas->r_max == ULONG_MAX))
4112 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4113
4114 new_end++;
4115 } else {
4116 if (wr_mas->end_piv == mas->last)
4117 wr_mas->offset_end++;
4118
4119 new_end -= wr_mas->offset_end - offset - 1;
4120 }
4121
4122 /* new range starts within a range */
4123 if (wr_mas->r_min < mas->index)
4124 new_end++;
4125
4126 /* Not enough room */
4127 if (new_end >= node_slots)
4128 return false;
4129
4130 /* Not enough data. */
4131 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4132 !(mas->mas_flags & MA_STATE_BULK))
4133 return false;
4134
4135 /* set up node. */
4136 if (in_rcu) {
4137 mas_node_count(mas, 1);
4138 if (mas_is_err(mas))
4139 return false;
4140
4141 newnode = mas_pop_node(mas);
4142 } else {
4143 memset(&reuse, 0, sizeof(struct maple_node));
4144 newnode = &reuse;
4145 }
4146
4147 newnode->parent = mas_mn(mas)->parent;
4148 dst_pivots = ma_pivots(newnode, wr_mas->type);
4149 dst_slots = ma_slots(newnode, wr_mas->type);
4150 /* Copy from start to insert point */
4151 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4152 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4153 dst_offset = offset;
4154
4155 /* Handle insert of new range starting after old range */
4156 if (wr_mas->r_min < mas->index) {
4157 mas->offset++;
4158 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4159 dst_pivots[dst_offset++] = mas->index - 1;
4160 }
4161
4162 /* Store the new entry and range end. */
4163 if (dst_offset < max_piv)
4164 dst_pivots[dst_offset] = mas->last;
4165 mas->offset = dst_offset;
4166 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4167
4168 /*
4169 * this range wrote to the end of the node or it overwrote the rest of
4170 * the data
4171 */
4172 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4173 new_end = dst_offset;
4174 goto done;
4175 }
4176
4177 dst_offset++;
4178 /* Copy to the end of node if necessary. */
4179 copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4180 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4181 sizeof(void *) * copy_size);
4182 if (dst_offset < max_piv) {
4183 if (copy_size > max_piv - dst_offset)
4184 copy_size = max_piv - dst_offset;
4185
4186 memcpy(dst_pivots + dst_offset,
4187 wr_mas->pivots + wr_mas->offset_end,
4188 sizeof(unsigned long) * copy_size);
4189 }
4190
4191 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4192 dst_pivots[new_end] = mas->max;
4193
4194 done:
4195 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4196 if (in_rcu) {
4197 mte_set_node_dead(mas->node);
4198 mas->node = mt_mk_node(newnode, wr_mas->type);
4199 mas_replace(mas, false);
4200 } else {
4201 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4202 }
4203 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4204 mas_update_gap(mas);
4205 return true;
4206 }
4207
4208 /*
4209 * mas_wr_slot_store: Attempt to store a value in a slot.
4210 * @wr_mas: the maple write state
4211 *
4212 * Return: True if stored, false otherwise
4213 */
4214 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4215 {
4216 struct ma_state *mas = wr_mas->mas;
4217 unsigned long lmax; /* Logical max. */
4218 unsigned char offset = mas->offset;
4219
4220 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4221 (offset != wr_mas->node_end)))
4222 return false;
4223
4224 if (offset == wr_mas->node_end - 1)
4225 lmax = mas->max;
4226 else
4227 lmax = wr_mas->pivots[offset + 1];
4228
4229 /* going to overwrite too many slots. */
4230 if (lmax < mas->last)
4231 return false;
4232
4233 if (wr_mas->r_min == mas->index) {
4234 /* overwriting two or more ranges with one. */
4235 if (lmax == mas->last)
4236 return false;
4237
4238 /* Overwriting all of offset and a portion of offset + 1. */
4239 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4240 wr_mas->pivots[offset] = mas->last;
4241 goto done;
4242 }
4243
4244 /* Doesn't end on the next range end. */
4245 if (lmax != mas->last)
4246 return false;
4247
4248 /* Overwriting a portion of offset and all of offset + 1 */
4249 if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4250 (wr_mas->entry || wr_mas->pivots[offset + 1]))
4251 wr_mas->pivots[offset + 1] = mas->last;
4252
4253 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4254 wr_mas->pivots[offset] = mas->index - 1;
4255 mas->offset++; /* Keep mas accurate. */
4256
4257 done:
4258 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4259 mas_update_gap(mas);
4260 return true;
4261 }
4262
4263 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4264 {
4265 while ((wr_mas->offset_end < wr_mas->node_end) &&
4266 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4267 wr_mas->offset_end++;
4268
4269 if (wr_mas->offset_end < wr_mas->node_end)
4270 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4271 else
4272 wr_mas->end_piv = wr_mas->mas->max;
4273 }
4274
4275 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4276 {
4277 struct ma_state *mas = wr_mas->mas;
4278
4279 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4280 mas->last = wr_mas->end_piv;
4281
4282 /* Check next slot(s) if we are overwriting the end */
4283 if ((mas->last == wr_mas->end_piv) &&
4284 (wr_mas->node_end != wr_mas->offset_end) &&
4285 !wr_mas->slots[wr_mas->offset_end + 1]) {
4286 wr_mas->offset_end++;
4287 if (wr_mas->offset_end == wr_mas->node_end)
4288 mas->last = mas->max;
4289 else
4290 mas->last = wr_mas->pivots[wr_mas->offset_end];
4291 wr_mas->end_piv = mas->last;
4292 }
4293
4294 if (!wr_mas->content) {
4295 /* If this one is null, the next and prev are not */
4296 mas->index = wr_mas->r_min;
4297 } else {
4298 /* Check prev slot if we are overwriting the start */
4299 if (mas->index == wr_mas->r_min && mas->offset &&
4300 !wr_mas->slots[mas->offset - 1]) {
4301 mas->offset--;
4302 wr_mas->r_min = mas->index =
4303 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4304 wr_mas->r_max = wr_mas->pivots[mas->offset];
4305 }
4306 }
4307 }
4308
4309 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4310 {
4311 unsigned char end = wr_mas->node_end;
4312 unsigned char new_end = end + 1;
4313 struct ma_state *mas = wr_mas->mas;
4314 unsigned char node_pivots = mt_pivots[wr_mas->type];
4315
4316 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4317 if (new_end < node_pivots)
4318 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4319
4320 if (new_end < node_pivots)
4321 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4322
4323 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4324 mas->offset = new_end;
4325 wr_mas->pivots[end] = mas->index - 1;
4326
4327 return true;
4328 }
4329
4330 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4331 if (new_end < node_pivots)
4332 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4333
4334 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4335 if (new_end < node_pivots)
4336 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4337
4338 wr_mas->pivots[end] = mas->last;
4339 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4340 return true;
4341 }
4342
4343 return false;
4344 }
4345
4346 /*
4347 * mas_wr_bnode() - Slow path for a modification.
4348 * @wr_mas: The write maple state
4349 *
4350 * This is where split, rebalance end up.
4351 */
4352 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4353 {
4354 struct maple_big_node b_node;
4355
4356 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4357 memset(&b_node, 0, sizeof(struct maple_big_node));
4358 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4359 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4360 }
4361
4362 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4363 {
4364 unsigned char node_slots;
4365 unsigned char node_size;
4366 struct ma_state *mas = wr_mas->mas;
4367
4368 /* Direct replacement */
4369 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4370 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4371 if (!!wr_mas->entry ^ !!wr_mas->content)
4372 mas_update_gap(mas);
4373 return;
4374 }
4375
4376 /* Attempt to append */
4377 node_slots = mt_slots[wr_mas->type];
4378 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4379 if (mas->max == ULONG_MAX)
4380 node_size++;
4381
4382 /* slot and node store will not fit, go to the slow path */
4383 if (unlikely(node_size >= node_slots))
4384 goto slow_path;
4385
4386 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4387 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4388 if (!wr_mas->content || !wr_mas->entry)
4389 mas_update_gap(mas);
4390 return;
4391 }
4392
4393 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4394 return;
4395 else if (mas_wr_node_store(wr_mas))
4396 return;
4397
4398 if (mas_is_err(mas))
4399 return;
4400
4401 slow_path:
4402 mas_wr_bnode(wr_mas);
4403 }
4404
4405 /*
4406 * mas_wr_store_entry() - Internal call to store a value
4407 * @mas: The maple state
4408 * @entry: The entry to store.
4409 *
4410 * Return: The contents that was stored at the index.
4411 */
4412 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4413 {
4414 struct ma_state *mas = wr_mas->mas;
4415
4416 wr_mas->content = mas_start(mas);
4417 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4418 mas_store_root(mas, wr_mas->entry);
4419 return wr_mas->content;
4420 }
4421
4422 if (unlikely(!mas_wr_walk(wr_mas))) {
4423 mas_wr_spanning_store(wr_mas);
4424 return wr_mas->content;
4425 }
4426
4427 /* At this point, we are at the leaf node that needs to be altered. */
4428 mas_wr_end_piv(wr_mas);
4429
4430 if (!wr_mas->entry)
4431 mas_wr_extend_null(wr_mas);
4432
4433 /* New root for a single pointer */
4434 if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4435 mas_new_root(mas, wr_mas->entry);
4436 return wr_mas->content;
4437 }
4438
4439 mas_wr_modify(wr_mas);
4440 return wr_mas->content;
4441 }
4442
4443 /**
4444 * mas_insert() - Internal call to insert a value
4445 * @mas: The maple state
4446 * @entry: The entry to store
4447 *
4448 * Return: %NULL or the contents that already exists at the requested index
4449 * otherwise. The maple state needs to be checked for error conditions.
4450 */
4451 static inline void *mas_insert(struct ma_state *mas, void *entry)
4452 {
4453 MA_WR_STATE(wr_mas, mas, entry);
4454
4455 /*
4456 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4457 * tree. If the insert fits exactly into an existing gap with a value
4458 * of NULL, then the slot only needs to be written with the new value.
4459 * If the range being inserted is adjacent to another range, then only a
4460 * single pivot needs to be inserted (as well as writing the entry). If
4461 * the new range is within a gap but does not touch any other ranges,
4462 * then two pivots need to be inserted: the start - 1, and the end. As
4463 * usual, the entry must be written. Most operations require a new node
4464 * to be allocated and replace an existing node to ensure RCU safety,
4465 * when in RCU mode. The exception to requiring a newly allocated node
4466 * is when inserting at the end of a node (appending). When done
4467 * carefully, appending can reuse the node in place.
4468 */
4469 wr_mas.content = mas_start(mas);
4470 if (wr_mas.content)
4471 goto exists;
4472
4473 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4474 mas_store_root(mas, entry);
4475 return NULL;
4476 }
4477
4478 /* spanning writes always overwrite something */
4479 if (!mas_wr_walk(&wr_mas))
4480 goto exists;
4481
4482 /* At this point, we are at the leaf node that needs to be altered. */
4483 wr_mas.offset_end = mas->offset;
4484 wr_mas.end_piv = wr_mas.r_max;
4485
4486 if (wr_mas.content || (mas->last > wr_mas.r_max))
4487 goto exists;
4488
4489 if (!entry)
4490 return NULL;
4491
4492 mas_wr_modify(&wr_mas);
4493 return wr_mas.content;
4494
4495 exists:
4496 mas_set_err(mas, -EEXIST);
4497 return wr_mas.content;
4498
4499 }
4500
4501 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4502 {
4503 retry:
4504 mas_set(mas, index);
4505 mas_state_walk(mas);
4506 if (mas_is_start(mas))
4507 goto retry;
4508 }
4509
4510 static inline bool mas_rewalk_if_dead(struct ma_state *mas,
4511 struct maple_node *node, const unsigned long index)
4512 {
4513 if (unlikely(ma_dead_node(node))) {
4514 mas_rewalk(mas, index);
4515 return true;
4516 }
4517 return false;
4518 }
4519
4520 /*
4521 * mas_prev_node() - Find the prev non-null entry at the same level in the
4522 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4523 * @mas: The maple state
4524 * @min: The lower limit to search
4525 *
4526 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4527 * Return: 1 if the node is dead, 0 otherwise.
4528 */
4529 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4530 {
4531 enum maple_type mt;
4532 int offset, level;
4533 void __rcu **slots;
4534 struct maple_node *node;
4535 struct maple_enode *enode;
4536 unsigned long *pivots;
4537
4538 if (mas_is_none(mas))
4539 return 0;
4540
4541 level = 0;
4542 do {
4543 node = mas_mn(mas);
4544 if (ma_is_root(node))
4545 goto no_entry;
4546
4547 /* Walk up. */
4548 if (unlikely(mas_ascend(mas)))
4549 return 1;
4550 offset = mas->offset;
4551 level++;
4552 } while (!offset);
4553
4554 offset--;
4555 mt = mte_node_type(mas->node);
4556 node = mas_mn(mas);
4557 slots = ma_slots(node, mt);
4558 pivots = ma_pivots(node, mt);
4559 if (unlikely(ma_dead_node(node)))
4560 return 1;
4561
4562 mas->max = pivots[offset];
4563 if (offset)
4564 mas->min = pivots[offset - 1] + 1;
4565 if (unlikely(ma_dead_node(node)))
4566 return 1;
4567
4568 if (mas->max < min)
4569 goto no_entry_min;
4570
4571 while (level > 1) {
4572 level--;
4573 enode = mas_slot(mas, slots, offset);
4574 if (unlikely(ma_dead_node(node)))
4575 return 1;
4576
4577 mas->node = enode;
4578 mt = mte_node_type(mas->node);
4579 node = mas_mn(mas);
4580 slots = ma_slots(node, mt);
4581 pivots = ma_pivots(node, mt);
4582 offset = ma_data_end(node, mt, pivots, mas->max);
4583 if (unlikely(ma_dead_node(node)))
4584 return 1;
4585
4586 if (offset)
4587 mas->min = pivots[offset - 1] + 1;
4588
4589 if (offset < mt_pivots[mt])
4590 mas->max = pivots[offset];
4591
4592 if (mas->max < min)
4593 goto no_entry;
4594 }
4595
4596 mas->node = mas_slot(mas, slots, offset);
4597 if (unlikely(ma_dead_node(node)))
4598 return 1;
4599
4600 mas->offset = mas_data_end(mas);
4601 if (unlikely(mte_dead_node(mas->node)))
4602 return 1;
4603
4604 return 0;
4605
4606 no_entry_min:
4607 mas->offset = offset;
4608 if (offset)
4609 mas->min = pivots[offset - 1] + 1;
4610 no_entry:
4611 if (unlikely(ma_dead_node(node)))
4612 return 1;
4613
4614 mas->node = MAS_NONE;
4615 return 0;
4616 }
4617
4618 /*
4619 * mas_next_node() - Get the next node at the same level in the tree.
4620 * @mas: The maple state
4621 * @max: The maximum pivot value to check.
4622 *
4623 * The next value will be mas->node[mas->offset] or MAS_NONE.
4624 * Return: 1 on dead node, 0 otherwise.
4625 */
4626 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4627 unsigned long max)
4628 {
4629 unsigned long min;
4630 unsigned long *pivots;
4631 struct maple_enode *enode;
4632 int level = 0;
4633 unsigned char node_end;
4634 enum maple_type mt;
4635 void __rcu **slots;
4636
4637 if (mas->max >= max)
4638 goto no_entry;
4639
4640 min = mas->max + 1;
4641 level = 0;
4642 do {
4643 if (ma_is_root(node))
4644 goto no_entry;
4645
4646 /* Walk up. */
4647 if (unlikely(mas_ascend(mas)))
4648 return 1;
4649
4650 level++;
4651 node = mas_mn(mas);
4652 mt = mte_node_type(mas->node);
4653 pivots = ma_pivots(node, mt);
4654 node_end = ma_data_end(node, mt, pivots, mas->max);
4655 if (unlikely(ma_dead_node(node)))
4656 return 1;
4657
4658 } while (unlikely(mas->offset == node_end));
4659
4660 slots = ma_slots(node, mt);
4661 mas->offset++;
4662 enode = mas_slot(mas, slots, mas->offset);
4663 if (unlikely(ma_dead_node(node)))
4664 return 1;
4665
4666 if (level > 1)
4667 mas->offset = 0;
4668
4669 while (unlikely(level > 1)) {
4670 level--;
4671 mas->node = enode;
4672 node = mas_mn(mas);
4673 mt = mte_node_type(mas->node);
4674 slots = ma_slots(node, mt);
4675 enode = mas_slot(mas, slots, 0);
4676 if (unlikely(ma_dead_node(node)))
4677 return 1;
4678 }
4679
4680 if (!mas->offset)
4681 pivots = ma_pivots(node, mt);
4682
4683 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4684 if (unlikely(ma_dead_node(node)))
4685 return 1;
4686
4687 mas->node = enode;
4688 mas->min = min;
4689 return 0;
4690
4691 no_entry:
4692 if (unlikely(ma_dead_node(node)))
4693 return 1;
4694
4695 mas->node = MAS_NONE;
4696 return 0;
4697 }
4698
4699 /*
4700 * mas_next_slot() - Get the entry in the next slot
4701 *
4702 * @mas: The maple state
4703 * @max: The maximum starting range
4704 * @empty: Can be empty
4705 *
4706 * Return: The entry in the next slot which is possibly NULL
4707 */
4708 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4709 {
4710 void __rcu **slots;
4711 unsigned long *pivots;
4712 unsigned long pivot;
4713 enum maple_type type;
4714 struct maple_node *node;
4715 unsigned char data_end;
4716 unsigned long save_point = mas->last;
4717 void *entry;
4718
4719 retry:
4720 node = mas_mn(mas);
4721 type = mte_node_type(mas->node);
4722 pivots = ma_pivots(node, type);
4723 data_end = ma_data_end(node, type, pivots, mas->max);
4724 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4725 goto retry;
4726
4727 again:
4728 if (mas->max >= max) {
4729 if (likely(mas->offset < data_end))
4730 pivot = pivots[mas->offset];
4731 else
4732 return NULL; /* must be mas->max */
4733
4734 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4735 goto retry;
4736
4737 if (pivot >= max)
4738 return NULL;
4739 }
4740
4741 if (likely(mas->offset < data_end)) {
4742 mas->index = pivots[mas->offset] + 1;
4743 mas->offset++;
4744 if (likely(mas->offset < data_end))
4745 mas->last = pivots[mas->offset];
4746 else
4747 mas->last = mas->max;
4748 } else {
4749 if (mas_next_node(mas, node, max)) {
4750 mas_rewalk(mas, save_point);
4751 goto retry;
4752 }
4753
4754 if (mas_is_none(mas))
4755 return NULL;
4756
4757 mas->offset = 0;
4758 mas->index = mas->min;
4759 node = mas_mn(mas);
4760 type = mte_node_type(mas->node);
4761 pivots = ma_pivots(node, type);
4762 mas->last = pivots[0];
4763 }
4764
4765 slots = ma_slots(node, type);
4766 entry = mt_slot(mas->tree, slots, mas->offset);
4767 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4768 goto retry;
4769
4770 if (entry)
4771 return entry;
4772
4773 if (!empty) {
4774 if (!mas->offset)
4775 data_end = 2;
4776 goto again;
4777 }
4778
4779 return entry;
4780 }
4781
4782 /*
4783 * mas_next_entry() - Internal function to get the next entry.
4784 * @mas: The maple state
4785 * @limit: The maximum range start.
4786 *
4787 * Set the @mas->node to the next entry and the range_start to
4788 * the beginning value for the entry. Does not check beyond @limit.
4789 * Sets @mas->index and @mas->last to the limit if it is hit.
4790 * Restarts on dead nodes.
4791 *
4792 * Return: the next entry or %NULL.
4793 */
4794 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4795 {
4796 if (mas->last >= limit)
4797 return NULL;
4798
4799 return mas_next_slot(mas, limit, false);
4800 }
4801
4802 /*
4803 * mas_prev_nentry() - Get the previous node entry.
4804 * @mas: The maple state.
4805 * @limit: The lower limit to check for a value.
4806 *
4807 * Return: the entry, %NULL otherwise.
4808 */
4809 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4810 unsigned long index)
4811 {
4812 unsigned long pivot, min;
4813 unsigned char offset, count;
4814 struct maple_node *mn;
4815 enum maple_type mt;
4816 unsigned long *pivots;
4817 void __rcu **slots;
4818 void *entry;
4819
4820 retry:
4821 if (!mas->offset)
4822 return NULL;
4823
4824 mn = mas_mn(mas);
4825 mt = mte_node_type(mas->node);
4826 offset = mas->offset - 1;
4827 slots = ma_slots(mn, mt);
4828 pivots = ma_pivots(mn, mt);
4829 count = ma_data_end(mn, mt, pivots, mas->max);
4830 if (unlikely(mas_rewalk_if_dead(mas, mn, index)))
4831 goto retry;
4832
4833 offset = mas->offset - 1;
4834 if (offset >= mt_slots[mt])
4835 offset = mt_slots[mt] - 1;
4836
4837 if (offset >= count) {
4838 pivot = mas->max;
4839 offset = count;
4840 } else {
4841 pivot = pivots[offset];
4842 }
4843
4844 if (unlikely(mas_rewalk_if_dead(mas, mn, index)))
4845 goto retry;
4846
4847 while (offset && !mas_slot(mas, slots, offset)) {
4848 pivot = pivots[--offset];
4849 if (pivot >= limit)
4850 break;
4851 }
4852
4853 /*
4854 * If the slot was null but we've shifted outside the limits, then set
4855 * the range to the last NULL.
4856 */
4857 if (unlikely((pivot < limit) && (offset < mas->offset)))
4858 pivot = pivots[++offset];
4859
4860 min = mas_safe_min(mas, pivots, offset);
4861 entry = mas_slot(mas, slots, offset);
4862 if (unlikely(mas_rewalk_if_dead(mas, mn, index)))
4863 goto retry;
4864
4865 mas->offset = offset;
4866 mas->last = pivot;
4867 mas->index = min;
4868 return entry;
4869 }
4870
4871 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4872 {
4873 void *entry;
4874 struct maple_enode *prev_enode;
4875 unsigned char prev_offset;
4876
4877 if (mas->index < min)
4878 return NULL;
4879
4880 retry:
4881 prev_enode = mas->node;
4882 prev_offset = mas->offset;
4883 while (likely(!mas_is_none(mas))) {
4884 entry = mas_prev_nentry(mas, min, mas->index);
4885
4886 if (likely(entry))
4887 return entry;
4888
4889 if (unlikely(mas->index <= min))
4890 return NULL;
4891
4892 if (unlikely(mas_prev_node(mas, min))) {
4893 mas_rewalk(mas, mas->index);
4894 goto retry;
4895 }
4896
4897 mas->offset++;
4898 }
4899
4900 mas->node = prev_enode;
4901 mas->offset = prev_offset;
4902 return NULL;
4903 }
4904
4905 /*
4906 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4907 * highest gap address of a given size in a given node and descend.
4908 * @mas: The maple state
4909 * @size: The needed size.
4910 *
4911 * Return: True if found in a leaf, false otherwise.
4912 *
4913 */
4914 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4915 unsigned long *gap_min, unsigned long *gap_max)
4916 {
4917 enum maple_type type = mte_node_type(mas->node);
4918 struct maple_node *node = mas_mn(mas);
4919 unsigned long *pivots, *gaps;
4920 void __rcu **slots;
4921 unsigned long gap = 0;
4922 unsigned long max, min;
4923 unsigned char offset;
4924
4925 if (unlikely(mas_is_err(mas)))
4926 return true;
4927
4928 if (ma_is_dense(type)) {
4929 /* dense nodes. */
4930 mas->offset = (unsigned char)(mas->index - mas->min);
4931 return true;
4932 }
4933
4934 pivots = ma_pivots(node, type);
4935 slots = ma_slots(node, type);
4936 gaps = ma_gaps(node, type);
4937 offset = mas->offset;
4938 min = mas_safe_min(mas, pivots, offset);
4939 /* Skip out of bounds. */
4940 while (mas->last < min)
4941 min = mas_safe_min(mas, pivots, --offset);
4942
4943 max = mas_safe_pivot(mas, pivots, offset, type);
4944 while (mas->index <= max) {
4945 gap = 0;
4946 if (gaps)
4947 gap = gaps[offset];
4948 else if (!mas_slot(mas, slots, offset))
4949 gap = max - min + 1;
4950
4951 if (gap) {
4952 if ((size <= gap) && (size <= mas->last - min + 1))
4953 break;
4954
4955 if (!gaps) {
4956 /* Skip the next slot, it cannot be a gap. */
4957 if (offset < 2)
4958 goto ascend;
4959
4960 offset -= 2;
4961 max = pivots[offset];
4962 min = mas_safe_min(mas, pivots, offset);
4963 continue;
4964 }
4965 }
4966
4967 if (!offset)
4968 goto ascend;
4969
4970 offset--;
4971 max = min - 1;
4972 min = mas_safe_min(mas, pivots, offset);
4973 }
4974
4975 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4976 goto no_space;
4977
4978 if (unlikely(ma_is_leaf(type))) {
4979 mas->offset = offset;
4980 *gap_min = min;
4981 *gap_max = min + gap - 1;
4982 return true;
4983 }
4984
4985 /* descend, only happens under lock. */
4986 mas->node = mas_slot(mas, slots, offset);
4987 mas->min = min;
4988 mas->max = max;
4989 mas->offset = mas_data_end(mas);
4990 return false;
4991
4992 ascend:
4993 if (!mte_is_root(mas->node))
4994 return false;
4995
4996 no_space:
4997 mas_set_err(mas, -EBUSY);
4998 return false;
4999 }
5000
5001 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5002 {
5003 enum maple_type type = mte_node_type(mas->node);
5004 unsigned long pivot, min, gap = 0;
5005 unsigned char offset, data_end;
5006 unsigned long *gaps, *pivots;
5007 void __rcu **slots;
5008 struct maple_node *node;
5009 bool found = false;
5010
5011 if (ma_is_dense(type)) {
5012 mas->offset = (unsigned char)(mas->index - mas->min);
5013 return true;
5014 }
5015
5016 node = mas_mn(mas);
5017 pivots = ma_pivots(node, type);
5018 slots = ma_slots(node, type);
5019 gaps = ma_gaps(node, type);
5020 offset = mas->offset;
5021 min = mas_safe_min(mas, pivots, offset);
5022 data_end = ma_data_end(node, type, pivots, mas->max);
5023 for (; offset <= data_end; offset++) {
5024 pivot = mas_logical_pivot(mas, pivots, offset, type);
5025
5026 /* Not within lower bounds */
5027 if (mas->index > pivot)
5028 goto next_slot;
5029
5030 if (gaps)
5031 gap = gaps[offset];
5032 else if (!mas_slot(mas, slots, offset))
5033 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5034 else
5035 goto next_slot;
5036
5037 if (gap >= size) {
5038 if (ma_is_leaf(type)) {
5039 found = true;
5040 goto done;
5041 }
5042 if (mas->index <= pivot) {
5043 mas->node = mas_slot(mas, slots, offset);
5044 mas->min = min;
5045 mas->max = pivot;
5046 offset = 0;
5047 break;
5048 }
5049 }
5050 next_slot:
5051 min = pivot + 1;
5052 if (mas->last <= pivot) {
5053 mas_set_err(mas, -EBUSY);
5054 return true;
5055 }
5056 }
5057
5058 if (mte_is_root(mas->node))
5059 found = true;
5060 done:
5061 mas->offset = offset;
5062 return found;
5063 }
5064
5065 /**
5066 * mas_walk() - Search for @mas->index in the tree.
5067 * @mas: The maple state.
5068 *
5069 * mas->index and mas->last will be set to the range if there is a value. If
5070 * mas->node is MAS_NONE, reset to MAS_START.
5071 *
5072 * Return: the entry at the location or %NULL.
5073 */
5074 void *mas_walk(struct ma_state *mas)
5075 {
5076 void *entry;
5077
5078 retry:
5079 entry = mas_state_walk(mas);
5080 if (mas_is_start(mas))
5081 goto retry;
5082
5083 if (mas_is_ptr(mas)) {
5084 if (!mas->index) {
5085 mas->last = 0;
5086 } else {
5087 mas->index = 1;
5088 mas->last = ULONG_MAX;
5089 }
5090 return entry;
5091 }
5092
5093 if (mas_is_none(mas)) {
5094 mas->index = 0;
5095 mas->last = ULONG_MAX;
5096 }
5097
5098 return entry;
5099 }
5100 EXPORT_SYMBOL_GPL(mas_walk);
5101
5102 static inline bool mas_rewind_node(struct ma_state *mas)
5103 {
5104 unsigned char slot;
5105
5106 do {
5107 if (mte_is_root(mas->node)) {
5108 slot = mas->offset;
5109 if (!slot)
5110 return false;
5111 } else {
5112 mas_ascend(mas);
5113 slot = mas->offset;
5114 }
5115 } while (!slot);
5116
5117 mas->offset = --slot;
5118 return true;
5119 }
5120
5121 /*
5122 * mas_skip_node() - Internal function. Skip over a node.
5123 * @mas: The maple state.
5124 *
5125 * Return: true if there is another node, false otherwise.
5126 */
5127 static inline bool mas_skip_node(struct ma_state *mas)
5128 {
5129 if (mas_is_err(mas))
5130 return false;
5131
5132 do {
5133 if (mte_is_root(mas->node)) {
5134 if (mas->offset >= mas_data_end(mas)) {
5135 mas_set_err(mas, -EBUSY);
5136 return false;
5137 }
5138 } else {
5139 mas_ascend(mas);
5140 }
5141 } while (mas->offset >= mas_data_end(mas));
5142
5143 mas->offset++;
5144 return true;
5145 }
5146
5147 /*
5148 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5149 * @size
5150 * @mas: The maple state
5151 * @size: The size of the gap required
5152 *
5153 * Search between @mas->index and @mas->last for a gap of @size.
5154 */
5155 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5156 {
5157 struct maple_enode *last = NULL;
5158
5159 /*
5160 * There are 4 options:
5161 * go to child (descend)
5162 * go back to parent (ascend)
5163 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5164 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5165 */
5166 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5167 if (last == mas->node)
5168 mas_skip_node(mas);
5169 else
5170 last = mas->node;
5171 }
5172 }
5173
5174 /*
5175 * mas_fill_gap() - Fill a located gap with @entry.
5176 * @mas: The maple state
5177 * @entry: The value to store
5178 * @slot: The offset into the node to store the @entry
5179 * @size: The size of the entry
5180 * @index: The start location
5181 */
5182 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5183 unsigned char slot, unsigned long size, unsigned long *index)
5184 {
5185 MA_WR_STATE(wr_mas, mas, entry);
5186 unsigned char pslot = mte_parent_slot(mas->node);
5187 struct maple_enode *mn = mas->node;
5188 unsigned long *pivots;
5189 enum maple_type ptype;
5190 /*
5191 * mas->index is the start address for the search
5192 * which may no longer be needed.
5193 * mas->last is the end address for the search
5194 */
5195
5196 *index = mas->index;
5197 mas->last = mas->index + size - 1;
5198
5199 /*
5200 * It is possible that using mas->max and mas->min to correctly
5201 * calculate the index and last will cause an issue in the gap
5202 * calculation, so fix the ma_state here
5203 */
5204 mas_ascend(mas);
5205 ptype = mte_node_type(mas->node);
5206 pivots = ma_pivots(mas_mn(mas), ptype);
5207 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5208 mas->min = mas_safe_min(mas, pivots, pslot);
5209 mas->node = mn;
5210 mas->offset = slot;
5211 mas_wr_store_entry(&wr_mas);
5212 }
5213
5214 /*
5215 * mas_sparse_area() - Internal function. Return upper or lower limit when
5216 * searching for a gap in an empty tree.
5217 * @mas: The maple state
5218 * @min: the minimum range
5219 * @max: The maximum range
5220 * @size: The size of the gap
5221 * @fwd: Searching forward or back
5222 */
5223 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5224 unsigned long max, unsigned long size, bool fwd)
5225 {
5226 if (!unlikely(mas_is_none(mas)) && min == 0) {
5227 min++;
5228 /*
5229 * At this time, min is increased, we need to recheck whether
5230 * the size is satisfied.
5231 */
5232 if (min > max || max - min + 1 < size)
5233 return -EBUSY;
5234 }
5235 /* mas_is_ptr */
5236
5237 if (fwd) {
5238 mas->index = min;
5239 mas->last = min + size - 1;
5240 } else {
5241 mas->last = max;
5242 mas->index = max - size + 1;
5243 }
5244 return 0;
5245 }
5246
5247 /*
5248 * mas_empty_area() - Get the lowest address within the range that is
5249 * sufficient for the size requested.
5250 * @mas: The maple state
5251 * @min: The lowest value of the range
5252 * @max: The highest value of the range
5253 * @size: The size needed
5254 */
5255 int mas_empty_area(struct ma_state *mas, unsigned long min,
5256 unsigned long max, unsigned long size)
5257 {
5258 unsigned char offset;
5259 unsigned long *pivots;
5260 enum maple_type mt;
5261
5262 if (min > max)
5263 return -EINVAL;
5264
5265 if (size == 0 || max - min < size - 1)
5266 return -EINVAL;
5267
5268 if (mas_is_start(mas))
5269 mas_start(mas);
5270 else if (mas->offset >= 2)
5271 mas->offset -= 2;
5272 else if (!mas_skip_node(mas))
5273 return -EBUSY;
5274
5275 /* Empty set */
5276 if (mas_is_none(mas) || mas_is_ptr(mas))
5277 return mas_sparse_area(mas, min, max, size, true);
5278
5279 /* The start of the window can only be within these values */
5280 mas->index = min;
5281 mas->last = max;
5282 mas_awalk(mas, size);
5283
5284 if (unlikely(mas_is_err(mas)))
5285 return xa_err(mas->node);
5286
5287 offset = mas->offset;
5288 if (unlikely(offset == MAPLE_NODE_SLOTS))
5289 return -EBUSY;
5290
5291 mt = mte_node_type(mas->node);
5292 pivots = ma_pivots(mas_mn(mas), mt);
5293 min = mas_safe_min(mas, pivots, offset);
5294 if (mas->index < min)
5295 mas->index = min;
5296 mas->last = mas->index + size - 1;
5297 return 0;
5298 }
5299 EXPORT_SYMBOL_GPL(mas_empty_area);
5300
5301 /*
5302 * mas_empty_area_rev() - Get the highest address within the range that is
5303 * sufficient for the size requested.
5304 * @mas: The maple state
5305 * @min: The lowest value of the range
5306 * @max: The highest value of the range
5307 * @size: The size needed
5308 */
5309 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5310 unsigned long max, unsigned long size)
5311 {
5312 struct maple_enode *last = mas->node;
5313
5314 if (min > max)
5315 return -EINVAL;
5316
5317 if (size == 0 || max - min < size - 1)
5318 return -EINVAL;
5319
5320 if (mas_is_start(mas)) {
5321 mas_start(mas);
5322 mas->offset = mas_data_end(mas);
5323 } else if (mas->offset >= 2) {
5324 mas->offset -= 2;
5325 } else if (!mas_rewind_node(mas)) {
5326 return -EBUSY;
5327 }
5328
5329 /* Empty set. */
5330 if (mas_is_none(mas) || mas_is_ptr(mas))
5331 return mas_sparse_area(mas, min, max, size, false);
5332
5333 /* The start of the window can only be within these values. */
5334 mas->index = min;
5335 mas->last = max;
5336
5337 while (!mas_rev_awalk(mas, size, &min, &max)) {
5338 if (last == mas->node) {
5339 if (!mas_rewind_node(mas))
5340 return -EBUSY;
5341 } else {
5342 last = mas->node;
5343 }
5344 }
5345
5346 if (mas_is_err(mas))
5347 return xa_err(mas->node);
5348
5349 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5350 return -EBUSY;
5351
5352 /* Trim the upper limit to the max. */
5353 if (max < mas->last)
5354 mas->last = max;
5355
5356 mas->index = mas->last - size + 1;
5357 return 0;
5358 }
5359 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5360
5361 static inline int mas_alloc(struct ma_state *mas, void *entry,
5362 unsigned long size, unsigned long *index)
5363 {
5364 unsigned long min;
5365
5366 mas_start(mas);
5367 if (mas_is_none(mas) || mas_is_ptr(mas)) {
5368 mas_root_expand(mas, entry);
5369 if (mas_is_err(mas))
5370 return xa_err(mas->node);
5371
5372 if (!mas->index)
5373 return mas_pivot(mas, 0);
5374 return mas_pivot(mas, 1);
5375 }
5376
5377 /* Must be walking a tree. */
5378 mas_awalk(mas, size);
5379 if (mas_is_err(mas))
5380 return xa_err(mas->node);
5381
5382 if (mas->offset == MAPLE_NODE_SLOTS)
5383 goto no_gap;
5384
5385 /*
5386 * At this point, mas->node points to the right node and we have an
5387 * offset that has a sufficient gap.
5388 */
5389 min = mas->min;
5390 if (mas->offset)
5391 min = mas_pivot(mas, mas->offset - 1) + 1;
5392
5393 if (mas_is_err(mas))
5394 return xa_err(mas->node);
5395
5396 if (mas->index < min)
5397 mas->index = min;
5398
5399 mas_fill_gap(mas, entry, mas->offset, size, index);
5400 return 0;
5401
5402 no_gap:
5403 return -EBUSY;
5404 }
5405
5406 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5407 unsigned long max, void *entry,
5408 unsigned long size, unsigned long *index)
5409 {
5410 int ret = 0;
5411
5412 ret = mas_empty_area_rev(mas, min, max, size);
5413 if (ret)
5414 return ret;
5415
5416 if (mas_is_err(mas))
5417 return xa_err(mas->node);
5418
5419 if (mas->offset == MAPLE_NODE_SLOTS)
5420 goto no_gap;
5421
5422 mas_fill_gap(mas, entry, mas->offset, size, index);
5423 return 0;
5424
5425 no_gap:
5426 return -EBUSY;
5427 }
5428
5429 /*
5430 * mte_dead_leaves() - Mark all leaves of a node as dead.
5431 * @mas: The maple state
5432 * @slots: Pointer to the slot array
5433 * @type: The maple node type
5434 *
5435 * Must hold the write lock.
5436 *
5437 * Return: The number of leaves marked as dead.
5438 */
5439 static inline
5440 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5441 void __rcu **slots)
5442 {
5443 struct maple_node *node;
5444 enum maple_type type;
5445 void *entry;
5446 int offset;
5447
5448 for (offset = 0; offset < mt_slot_count(enode); offset++) {
5449 entry = mt_slot(mt, slots, offset);
5450 type = mte_node_type(entry);
5451 node = mte_to_node(entry);
5452 /* Use both node and type to catch LE & BE metadata */
5453 if (!node || !type)
5454 break;
5455
5456 mte_set_node_dead(entry);
5457 node->type = type;
5458 rcu_assign_pointer(slots[offset], node);
5459 }
5460
5461 return offset;
5462 }
5463
5464 /**
5465 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5466 * @enode: The maple encoded node
5467 * @offset: The starting offset
5468 *
5469 * Note: This can only be used from the RCU callback context.
5470 */
5471 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5472 {
5473 struct maple_node *node, *next;
5474 void __rcu **slots = NULL;
5475
5476 next = mte_to_node(*enode);
5477 do {
5478 *enode = ma_enode_ptr(next);
5479 node = mte_to_node(*enode);
5480 slots = ma_slots(node, node->type);
5481 next = rcu_dereference_protected(slots[offset],
5482 lock_is_held(&rcu_callback_map));
5483 offset = 0;
5484 } while (!ma_is_leaf(next->type));
5485
5486 return slots;
5487 }
5488
5489 /**
5490 * mt_free_walk() - Walk & free a tree in the RCU callback context
5491 * @head: The RCU head that's within the node.
5492 *
5493 * Note: This can only be used from the RCU callback context.
5494 */
5495 static void mt_free_walk(struct rcu_head *head)
5496 {
5497 void __rcu **slots;
5498 struct maple_node *node, *start;
5499 struct maple_enode *enode;
5500 unsigned char offset;
5501 enum maple_type type;
5502
5503 node = container_of(head, struct maple_node, rcu);
5504
5505 if (ma_is_leaf(node->type))
5506 goto free_leaf;
5507
5508 start = node;
5509 enode = mt_mk_node(node, node->type);
5510 slots = mte_dead_walk(&enode, 0);
5511 node = mte_to_node(enode);
5512 do {
5513 mt_free_bulk(node->slot_len, slots);
5514 offset = node->parent_slot + 1;
5515 enode = node->piv_parent;
5516 if (mte_to_node(enode) == node)
5517 goto free_leaf;
5518
5519 type = mte_node_type(enode);
5520 slots = ma_slots(mte_to_node(enode), type);
5521 if ((offset < mt_slots[type]) &&
5522 rcu_dereference_protected(slots[offset],
5523 lock_is_held(&rcu_callback_map)))
5524 slots = mte_dead_walk(&enode, offset);
5525 node = mte_to_node(enode);
5526 } while ((node != start) || (node->slot_len < offset));
5527
5528 slots = ma_slots(node, node->type);
5529 mt_free_bulk(node->slot_len, slots);
5530
5531 free_leaf:
5532 mt_free_rcu(&node->rcu);
5533 }
5534
5535 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5536 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5537 {
5538 struct maple_node *node;
5539 struct maple_enode *next = *enode;
5540 void __rcu **slots = NULL;
5541 enum maple_type type;
5542 unsigned char next_offset = 0;
5543
5544 do {
5545 *enode = next;
5546 node = mte_to_node(*enode);
5547 type = mte_node_type(*enode);
5548 slots = ma_slots(node, type);
5549 next = mt_slot_locked(mt, slots, next_offset);
5550 if ((mte_dead_node(next)))
5551 next = mt_slot_locked(mt, slots, ++next_offset);
5552
5553 mte_set_node_dead(*enode);
5554 node->type = type;
5555 node->piv_parent = prev;
5556 node->parent_slot = offset;
5557 offset = next_offset;
5558 next_offset = 0;
5559 prev = *enode;
5560 } while (!mte_is_leaf(next));
5561
5562 return slots;
5563 }
5564
5565 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5566 bool free)
5567 {
5568 void __rcu **slots;
5569 struct maple_node *node = mte_to_node(enode);
5570 struct maple_enode *start;
5571
5572 if (mte_is_leaf(enode)) {
5573 node->type = mte_node_type(enode);
5574 goto free_leaf;
5575 }
5576
5577 start = enode;
5578 slots = mte_destroy_descend(&enode, mt, start, 0);
5579 node = mte_to_node(enode); // Updated in the above call.
5580 do {
5581 enum maple_type type;
5582 unsigned char offset;
5583 struct maple_enode *parent, *tmp;
5584
5585 node->slot_len = mte_dead_leaves(enode, mt, slots);
5586 if (free)
5587 mt_free_bulk(node->slot_len, slots);
5588 offset = node->parent_slot + 1;
5589 enode = node->piv_parent;
5590 if (mte_to_node(enode) == node)
5591 goto free_leaf;
5592
5593 type = mte_node_type(enode);
5594 slots = ma_slots(mte_to_node(enode), type);
5595 if (offset >= mt_slots[type])
5596 goto next;
5597
5598 tmp = mt_slot_locked(mt, slots, offset);
5599 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5600 parent = enode;
5601 enode = tmp;
5602 slots = mte_destroy_descend(&enode, mt, parent, offset);
5603 }
5604 next:
5605 node = mte_to_node(enode);
5606 } while (start != enode);
5607
5608 node = mte_to_node(enode);
5609 node->slot_len = mte_dead_leaves(enode, mt, slots);
5610 if (free)
5611 mt_free_bulk(node->slot_len, slots);
5612
5613 free_leaf:
5614 if (free)
5615 mt_free_rcu(&node->rcu);
5616 else
5617 mt_clear_meta(mt, node, node->type);
5618 }
5619
5620 /*
5621 * mte_destroy_walk() - Free a tree or sub-tree.
5622 * @enode: the encoded maple node (maple_enode) to start
5623 * @mt: the tree to free - needed for node types.
5624 *
5625 * Must hold the write lock.
5626 */
5627 static inline void mte_destroy_walk(struct maple_enode *enode,
5628 struct maple_tree *mt)
5629 {
5630 struct maple_node *node = mte_to_node(enode);
5631
5632 if (mt_in_rcu(mt)) {
5633 mt_destroy_walk(enode, mt, false);
5634 call_rcu(&node->rcu, mt_free_walk);
5635 } else {
5636 mt_destroy_walk(enode, mt, true);
5637 }
5638 }
5639
5640 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5641 {
5642 if (unlikely(mas_is_paused(wr_mas->mas)))
5643 mas_reset(wr_mas->mas);
5644
5645 if (!mas_is_start(wr_mas->mas)) {
5646 if (mas_is_none(wr_mas->mas)) {
5647 mas_reset(wr_mas->mas);
5648 } else {
5649 wr_mas->r_max = wr_mas->mas->max;
5650 wr_mas->type = mte_node_type(wr_mas->mas->node);
5651 if (mas_is_span_wr(wr_mas))
5652 mas_reset(wr_mas->mas);
5653 }
5654 }
5655 }
5656
5657 /* Interface */
5658
5659 /**
5660 * mas_store() - Store an @entry.
5661 * @mas: The maple state.
5662 * @entry: The entry to store.
5663 *
5664 * The @mas->index and @mas->last is used to set the range for the @entry.
5665 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5666 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5667 *
5668 * Return: the first entry between mas->index and mas->last or %NULL.
5669 */
5670 void *mas_store(struct ma_state *mas, void *entry)
5671 {
5672 MA_WR_STATE(wr_mas, mas, entry);
5673
5674 trace_ma_write(__func__, mas, 0, entry);
5675 #ifdef CONFIG_DEBUG_MAPLE_TREE
5676 if (MAS_WARN_ON(mas, mas->index > mas->last))
5677 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5678
5679 if (mas->index > mas->last) {
5680 mas_set_err(mas, -EINVAL);
5681 return NULL;
5682 }
5683
5684 #endif
5685
5686 /*
5687 * Storing is the same operation as insert with the added caveat that it
5688 * can overwrite entries. Although this seems simple enough, one may
5689 * want to examine what happens if a single store operation was to
5690 * overwrite multiple entries within a self-balancing B-Tree.
5691 */
5692 mas_wr_store_setup(&wr_mas);
5693 mas_wr_store_entry(&wr_mas);
5694 return wr_mas.content;
5695 }
5696 EXPORT_SYMBOL_GPL(mas_store);
5697
5698 /**
5699 * mas_store_gfp() - Store a value into the tree.
5700 * @mas: The maple state
5701 * @entry: The entry to store
5702 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5703 *
5704 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5705 * be allocated.
5706 */
5707 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5708 {
5709 MA_WR_STATE(wr_mas, mas, entry);
5710
5711 mas_wr_store_setup(&wr_mas);
5712 trace_ma_write(__func__, mas, 0, entry);
5713 retry:
5714 mas_wr_store_entry(&wr_mas);
5715 if (unlikely(mas_nomem(mas, gfp)))
5716 goto retry;
5717
5718 if (unlikely(mas_is_err(mas)))
5719 return xa_err(mas->node);
5720
5721 return 0;
5722 }
5723 EXPORT_SYMBOL_GPL(mas_store_gfp);
5724
5725 /**
5726 * mas_store_prealloc() - Store a value into the tree using memory
5727 * preallocated in the maple state.
5728 * @mas: The maple state
5729 * @entry: The entry to store.
5730 */
5731 void mas_store_prealloc(struct ma_state *mas, void *entry)
5732 {
5733 MA_WR_STATE(wr_mas, mas, entry);
5734
5735 mas_wr_store_setup(&wr_mas);
5736 trace_ma_write(__func__, mas, 0, entry);
5737 mas_wr_store_entry(&wr_mas);
5738 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5739 mas_destroy(mas);
5740 }
5741 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5742
5743 /**
5744 * mas_preallocate() - Preallocate enough nodes for a store operation
5745 * @mas: The maple state
5746 * @gfp: The GFP_FLAGS to use for allocations.
5747 *
5748 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5749 */
5750 int mas_preallocate(struct ma_state *mas, gfp_t gfp)
5751 {
5752 int ret;
5753
5754 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5755 mas->mas_flags |= MA_STATE_PREALLOC;
5756 if (likely(!mas_is_err(mas)))
5757 return 0;
5758
5759 mas_set_alloc_req(mas, 0);
5760 ret = xa_err(mas->node);
5761 mas_reset(mas);
5762 mas_destroy(mas);
5763 mas_reset(mas);
5764 return ret;
5765 }
5766 EXPORT_SYMBOL_GPL(mas_preallocate);
5767
5768 /*
5769 * mas_destroy() - destroy a maple state.
5770 * @mas: The maple state
5771 *
5772 * Upon completion, check the left-most node and rebalance against the node to
5773 * the right if necessary. Frees any allocated nodes associated with this maple
5774 * state.
5775 */
5776 void mas_destroy(struct ma_state *mas)
5777 {
5778 struct maple_alloc *node;
5779 unsigned long total;
5780
5781 /*
5782 * When using mas_for_each() to insert an expected number of elements,
5783 * it is possible that the number inserted is less than the expected
5784 * number. To fix an invalid final node, a check is performed here to
5785 * rebalance the previous node with the final node.
5786 */
5787 if (mas->mas_flags & MA_STATE_REBALANCE) {
5788 unsigned char end;
5789
5790 mas_start(mas);
5791 mtree_range_walk(mas);
5792 end = mas_data_end(mas) + 1;
5793 if (end < mt_min_slot_count(mas->node) - 1)
5794 mas_destroy_rebalance(mas, end);
5795
5796 mas->mas_flags &= ~MA_STATE_REBALANCE;
5797 }
5798 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5799
5800 total = mas_allocated(mas);
5801 while (total) {
5802 node = mas->alloc;
5803 mas->alloc = node->slot[0];
5804 if (node->node_count > 1) {
5805 size_t count = node->node_count - 1;
5806
5807 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5808 total -= count;
5809 }
5810 kmem_cache_free(maple_node_cache, node);
5811 total--;
5812 }
5813
5814 mas->alloc = NULL;
5815 }
5816 EXPORT_SYMBOL_GPL(mas_destroy);
5817
5818 /*
5819 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5820 * @mas: The maple state
5821 * @nr_entries: The number of expected entries.
5822 *
5823 * This will attempt to pre-allocate enough nodes to store the expected number
5824 * of entries. The allocations will occur using the bulk allocator interface
5825 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5826 * to ensure any unused nodes are freed.
5827 *
5828 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5829 */
5830 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5831 {
5832 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5833 struct maple_enode *enode = mas->node;
5834 int nr_nodes;
5835 int ret;
5836
5837 /*
5838 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5839 * forking a process and duplicating the VMAs from one tree to a new
5840 * tree. When such a situation arises, it is known that the new tree is
5841 * not going to be used until the entire tree is populated. For
5842 * performance reasons, it is best to use a bulk load with RCU disabled.
5843 * This allows for optimistic splitting that favours the left and reuse
5844 * of nodes during the operation.
5845 */
5846
5847 /* Optimize splitting for bulk insert in-order */
5848 mas->mas_flags |= MA_STATE_BULK;
5849
5850 /*
5851 * Avoid overflow, assume a gap between each entry and a trailing null.
5852 * If this is wrong, it just means allocation can happen during
5853 * insertion of entries.
5854 */
5855 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5856 if (!mt_is_alloc(mas->tree))
5857 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5858
5859 /* Leaves; reduce slots to keep space for expansion */
5860 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5861 /* Internal nodes */
5862 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5863 /* Add working room for split (2 nodes) + new parents */
5864 mas_node_count(mas, nr_nodes + 3);
5865
5866 /* Detect if allocations run out */
5867 mas->mas_flags |= MA_STATE_PREALLOC;
5868
5869 if (!mas_is_err(mas))
5870 return 0;
5871
5872 ret = xa_err(mas->node);
5873 mas->node = enode;
5874 mas_destroy(mas);
5875 return ret;
5876
5877 }
5878 EXPORT_SYMBOL_GPL(mas_expected_entries);
5879
5880 static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
5881 void **entry)
5882 {
5883 bool was_none = mas_is_none(mas);
5884
5885 if (mas_is_none(mas) || mas_is_paused(mas))
5886 mas->node = MAS_START;
5887
5888 if (mas_is_start(mas))
5889 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5890
5891 if (mas_is_ptr(mas)) {
5892 *entry = NULL;
5893 if (was_none && mas->index == 0) {
5894 mas->index = mas->last = 0;
5895 return true;
5896 }
5897 mas->index = 1;
5898 mas->last = ULONG_MAX;
5899 mas->node = MAS_NONE;
5900 return true;
5901 }
5902
5903 if (mas_is_none(mas))
5904 return true;
5905 return false;
5906 }
5907
5908 /**
5909 * mas_next() - Get the next entry.
5910 * @mas: The maple state
5911 * @max: The maximum index to check.
5912 *
5913 * Returns the next entry after @mas->index.
5914 * Must hold rcu_read_lock or the write lock.
5915 * Can return the zero entry.
5916 *
5917 * Return: The next entry or %NULL
5918 */
5919 void *mas_next(struct ma_state *mas, unsigned long max)
5920 {
5921 void *entry = NULL;
5922
5923 if (mas_next_setup(mas, max, &entry))
5924 return entry;
5925
5926 /* Retries on dead nodes handled by mas_next_slot */
5927 return mas_next_slot(mas, max, false);
5928 }
5929 EXPORT_SYMBOL_GPL(mas_next);
5930
5931 /**
5932 * mas_next_range() - Advance the maple state to the next range
5933 * @mas: The maple state
5934 * @max: The maximum index to check.
5935 *
5936 * Sets @mas->index and @mas->last to the range.
5937 * Must hold rcu_read_lock or the write lock.
5938 * Can return the zero entry.
5939 *
5940 * Return: The next entry or %NULL
5941 */
5942 void *mas_next_range(struct ma_state *mas, unsigned long max)
5943 {
5944 void *entry = NULL;
5945
5946 if (mas_next_setup(mas, max, &entry))
5947 return entry;
5948
5949 /* Retries on dead nodes handled by mas_next_slot */
5950 return mas_next_slot(mas, max, true);
5951 }
5952 EXPORT_SYMBOL_GPL(mas_next_range);
5953
5954 /**
5955 * mt_next() - get the next value in the maple tree
5956 * @mt: The maple tree
5957 * @index: The start index
5958 * @max: The maximum index to check
5959 *
5960 * Return: The entry at @index or higher, or %NULL if nothing is found.
5961 */
5962 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5963 {
5964 void *entry = NULL;
5965 MA_STATE(mas, mt, index, index);
5966
5967 rcu_read_lock();
5968 entry = mas_next(&mas, max);
5969 rcu_read_unlock();
5970 return entry;
5971 }
5972 EXPORT_SYMBOL_GPL(mt_next);
5973
5974 /**
5975 * mas_prev() - Get the previous entry
5976 * @mas: The maple state
5977 * @min: The minimum value to check.
5978 *
5979 * Must hold rcu_read_lock or the write lock.
5980 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5981 * searchable nodes.
5982 *
5983 * Return: the previous value or %NULL.
5984 */
5985 void *mas_prev(struct ma_state *mas, unsigned long min)
5986 {
5987 if (mas->index <= min)
5988 goto none;
5989
5990 if (mas_is_none(mas) || mas_is_paused(mas))
5991 mas->node = MAS_START;
5992
5993 if (mas_is_start(mas)) {
5994 mas_walk(mas);
5995 if (!mas->index)
5996 goto none;
5997 }
5998
5999 if (unlikely(mas_is_ptr(mas))) {
6000 if (!mas->index)
6001 goto none;
6002 mas->index = mas->last = 0;
6003 return mas_root(mas);
6004 }
6005
6006 if (mas_is_none(mas)) {
6007 if (mas->index) {
6008 /* Walked to out-of-range pointer? */
6009 mas->index = mas->last = 0;
6010 mas->node = MAS_ROOT;
6011 return mas_root(mas);
6012 }
6013 return NULL;
6014 }
6015 return mas_prev_entry(mas, min);
6016
6017 none:
6018 mas->node = MAS_NONE;
6019 return NULL;
6020 }
6021 EXPORT_SYMBOL_GPL(mas_prev);
6022
6023 /**
6024 * mt_prev() - get the previous value in the maple tree
6025 * @mt: The maple tree
6026 * @index: The start index
6027 * @min: The minimum index to check
6028 *
6029 * Return: The entry at @index or lower, or %NULL if nothing is found.
6030 */
6031 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
6032 {
6033 void *entry = NULL;
6034 MA_STATE(mas, mt, index, index);
6035
6036 rcu_read_lock();
6037 entry = mas_prev(&mas, min);
6038 rcu_read_unlock();
6039 return entry;
6040 }
6041 EXPORT_SYMBOL_GPL(mt_prev);
6042
6043 /**
6044 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6045 * @mas: The maple state to pause
6046 *
6047 * Some users need to pause a walk and drop the lock they're holding in
6048 * order to yield to a higher priority thread or carry out an operation
6049 * on an entry. Those users should call this function before they drop
6050 * the lock. It resets the @mas to be suitable for the next iteration
6051 * of the loop after the user has reacquired the lock. If most entries
6052 * found during a walk require you to call mas_pause(), the mt_for_each()
6053 * iterator may be more appropriate.
6054 *
6055 */
6056 void mas_pause(struct ma_state *mas)
6057 {
6058 mas->node = MAS_PAUSE;
6059 }
6060 EXPORT_SYMBOL_GPL(mas_pause);
6061
6062 /**
6063 * mas_find_setup() - Internal function to set up mas_find*().
6064 * @mas: The maple state
6065 * @max: The maximum index
6066 * @entry: Pointer to the entry
6067 *
6068 * Returns: True if entry is the answer, false otherwise.
6069 */
6070 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
6071 void **entry)
6072 {
6073 *entry = NULL;
6074
6075 if (unlikely(mas_is_none(mas))) {
6076 if (unlikely(mas->last >= max))
6077 return true;
6078
6079 mas->index = mas->last;
6080 mas->node = MAS_START;
6081 } else if (unlikely(mas_is_paused(mas))) {
6082 if (unlikely(mas->last >= max))
6083 return true;
6084
6085 mas->node = MAS_START;
6086 mas->index = ++mas->last;
6087 } else if (unlikely(mas_is_ptr(mas)))
6088 goto ptr_out_of_range;
6089
6090 if (unlikely(mas_is_start(mas))) {
6091 /* First run or continue */
6092 if (mas->index > max)
6093 return true;
6094
6095 *entry = mas_walk(mas);
6096 if (*entry)
6097 return true;
6098
6099 }
6100
6101 if (unlikely(!mas_searchable(mas))) {
6102 if (unlikely(mas_is_ptr(mas)))
6103 goto ptr_out_of_range;
6104
6105 return true;
6106 }
6107
6108 if (mas->index == max)
6109 return true;
6110
6111 return false;
6112
6113 ptr_out_of_range:
6114 mas->node = MAS_NONE;
6115 mas->index = 1;
6116 mas->last = ULONG_MAX;
6117 return true;
6118 }
6119
6120 /**
6121 * mas_find() - On the first call, find the entry at or after mas->index up to
6122 * %max. Otherwise, find the entry after mas->index.
6123 * @mas: The maple state
6124 * @max: The maximum value to check.
6125 *
6126 * Must hold rcu_read_lock or the write lock.
6127 * If an entry exists, last and index are updated accordingly.
6128 * May set @mas->node to MAS_NONE.
6129 *
6130 * Return: The entry or %NULL.
6131 */
6132 void *mas_find(struct ma_state *mas, unsigned long max)
6133 {
6134 void *entry = NULL;
6135
6136 if (mas_find_setup(mas, max, &entry))
6137 return entry;
6138
6139 /* Retries on dead nodes handled by mas_next_slot */
6140 return mas_next_slot(mas, max, false);
6141 }
6142 EXPORT_SYMBOL_GPL(mas_find);
6143
6144 /**
6145 * mas_find_range() - On the first call, find the entry at or after
6146 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
6147 * @mas: The maple state
6148 * @max: The maximum value to check.
6149 *
6150 * Must hold rcu_read_lock or the write lock.
6151 * If an entry exists, last and index are updated accordingly.
6152 * May set @mas->node to MAS_NONE.
6153 *
6154 * Return: The entry or %NULL.
6155 */
6156 void *mas_find_range(struct ma_state *mas, unsigned long max)
6157 {
6158 void *entry;
6159
6160 if (mas_find_setup(mas, max, &entry))
6161 return entry;
6162
6163 /* Retries on dead nodes handled by mas_next_slot */
6164 return mas_next_slot(mas, max, true);
6165 }
6166 EXPORT_SYMBOL_GPL(mas_find_range);
6167
6168 /**
6169 * mas_find_rev: On the first call, find the first non-null entry at or below
6170 * mas->index down to %min. Otherwise find the first non-null entry below
6171 * mas->index down to %min.
6172 * @mas: The maple state
6173 * @min: The minimum value to check.
6174 *
6175 * Must hold rcu_read_lock or the write lock.
6176 * If an entry exists, last and index are updated accordingly.
6177 * May set @mas->node to MAS_NONE.
6178 *
6179 * Return: The entry or %NULL.
6180 */
6181 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6182 {
6183 if (unlikely(mas_is_none(mas))) {
6184 if (mas->index <= min)
6185 goto none;
6186
6187 mas->last = mas->index;
6188 mas->node = MAS_START;
6189 }
6190
6191 if (unlikely(mas_is_paused(mas))) {
6192 if (unlikely(mas->index <= min)) {
6193 mas->node = MAS_NONE;
6194 return NULL;
6195 }
6196 mas->node = MAS_START;
6197 mas->last = --mas->index;
6198 }
6199
6200 if (unlikely(mas_is_start(mas))) {
6201 /* First run or continue */
6202 void *entry;
6203
6204 if (mas->index < min)
6205 return NULL;
6206
6207 entry = mas_walk(mas);
6208 if (entry)
6209 return entry;
6210 }
6211
6212 if (unlikely(!mas_searchable(mas))) {
6213 if (mas_is_ptr(mas))
6214 goto none;
6215
6216 if (mas_is_none(mas)) {
6217 /*
6218 * Walked to the location, and there was nothing so the
6219 * previous location is 0.
6220 */
6221 mas->last = mas->index = 0;
6222 mas->node = MAS_ROOT;
6223 return mas_root(mas);
6224 }
6225 }
6226
6227 if (mas->index < min)
6228 return NULL;
6229
6230 /* Retries on dead nodes handled by mas_prev_entry */
6231 return mas_prev_entry(mas, min);
6232
6233 none:
6234 mas->node = MAS_NONE;
6235 return NULL;
6236 }
6237 EXPORT_SYMBOL_GPL(mas_find_rev);
6238
6239 /**
6240 * mas_erase() - Find the range in which index resides and erase the entire
6241 * range.
6242 * @mas: The maple state
6243 *
6244 * Must hold the write lock.
6245 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6246 * erases that range.
6247 *
6248 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6249 */
6250 void *mas_erase(struct ma_state *mas)
6251 {
6252 void *entry;
6253 MA_WR_STATE(wr_mas, mas, NULL);
6254
6255 if (mas_is_none(mas) || mas_is_paused(mas))
6256 mas->node = MAS_START;
6257
6258 /* Retry unnecessary when holding the write lock. */
6259 entry = mas_state_walk(mas);
6260 if (!entry)
6261 return NULL;
6262
6263 write_retry:
6264 /* Must reset to ensure spanning writes of last slot are detected */
6265 mas_reset(mas);
6266 mas_wr_store_setup(&wr_mas);
6267 mas_wr_store_entry(&wr_mas);
6268 if (mas_nomem(mas, GFP_KERNEL))
6269 goto write_retry;
6270
6271 return entry;
6272 }
6273 EXPORT_SYMBOL_GPL(mas_erase);
6274
6275 /**
6276 * mas_nomem() - Check if there was an error allocating and do the allocation
6277 * if necessary If there are allocations, then free them.
6278 * @mas: The maple state
6279 * @gfp: The GFP_FLAGS to use for allocations
6280 * Return: true on allocation, false otherwise.
6281 */
6282 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6283 __must_hold(mas->tree->lock)
6284 {
6285 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6286 mas_destroy(mas);
6287 return false;
6288 }
6289
6290 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6291 mtree_unlock(mas->tree);
6292 mas_alloc_nodes(mas, gfp);
6293 mtree_lock(mas->tree);
6294 } else {
6295 mas_alloc_nodes(mas, gfp);
6296 }
6297
6298 if (!mas_allocated(mas))
6299 return false;
6300
6301 mas->node = MAS_START;
6302 return true;
6303 }
6304
6305 void __init maple_tree_init(void)
6306 {
6307 maple_node_cache = kmem_cache_create("maple_node",
6308 sizeof(struct maple_node), sizeof(struct maple_node),
6309 SLAB_PANIC, NULL);
6310 }
6311
6312 /**
6313 * mtree_load() - Load a value stored in a maple tree
6314 * @mt: The maple tree
6315 * @index: The index to load
6316 *
6317 * Return: the entry or %NULL
6318 */
6319 void *mtree_load(struct maple_tree *mt, unsigned long index)
6320 {
6321 MA_STATE(mas, mt, index, index);
6322 void *entry;
6323
6324 trace_ma_read(__func__, &mas);
6325 rcu_read_lock();
6326 retry:
6327 entry = mas_start(&mas);
6328 if (unlikely(mas_is_none(&mas)))
6329 goto unlock;
6330
6331 if (unlikely(mas_is_ptr(&mas))) {
6332 if (index)
6333 entry = NULL;
6334
6335 goto unlock;
6336 }
6337
6338 entry = mtree_lookup_walk(&mas);
6339 if (!entry && unlikely(mas_is_start(&mas)))
6340 goto retry;
6341 unlock:
6342 rcu_read_unlock();
6343 if (xa_is_zero(entry))
6344 return NULL;
6345
6346 return entry;
6347 }
6348 EXPORT_SYMBOL(mtree_load);
6349
6350 /**
6351 * mtree_store_range() - Store an entry at a given range.
6352 * @mt: The maple tree
6353 * @index: The start of the range
6354 * @last: The end of the range
6355 * @entry: The entry to store
6356 * @gfp: The GFP_FLAGS to use for allocations
6357 *
6358 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6359 * be allocated.
6360 */
6361 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6362 unsigned long last, void *entry, gfp_t gfp)
6363 {
6364 MA_STATE(mas, mt, index, last);
6365 MA_WR_STATE(wr_mas, &mas, entry);
6366
6367 trace_ma_write(__func__, &mas, 0, entry);
6368 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6369 return -EINVAL;
6370
6371 if (index > last)
6372 return -EINVAL;
6373
6374 mtree_lock(mt);
6375 retry:
6376 mas_wr_store_entry(&wr_mas);
6377 if (mas_nomem(&mas, gfp))
6378 goto retry;
6379
6380 mtree_unlock(mt);
6381 if (mas_is_err(&mas))
6382 return xa_err(mas.node);
6383
6384 return 0;
6385 }
6386 EXPORT_SYMBOL(mtree_store_range);
6387
6388 /**
6389 * mtree_store() - Store an entry at a given index.
6390 * @mt: The maple tree
6391 * @index: The index to store the value
6392 * @entry: The entry to store
6393 * @gfp: The GFP_FLAGS to use for allocations
6394 *
6395 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6396 * be allocated.
6397 */
6398 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6399 gfp_t gfp)
6400 {
6401 return mtree_store_range(mt, index, index, entry, gfp);
6402 }
6403 EXPORT_SYMBOL(mtree_store);
6404
6405 /**
6406 * mtree_insert_range() - Insert an entry at a give range if there is no value.
6407 * @mt: The maple tree
6408 * @first: The start of the range
6409 * @last: The end of the range
6410 * @entry: The entry to store
6411 * @gfp: The GFP_FLAGS to use for allocations.
6412 *
6413 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6414 * request, -ENOMEM if memory could not be allocated.
6415 */
6416 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6417 unsigned long last, void *entry, gfp_t gfp)
6418 {
6419 MA_STATE(ms, mt, first, last);
6420
6421 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6422 return -EINVAL;
6423
6424 if (first > last)
6425 return -EINVAL;
6426
6427 mtree_lock(mt);
6428 retry:
6429 mas_insert(&ms, entry);
6430 if (mas_nomem(&ms, gfp))
6431 goto retry;
6432
6433 mtree_unlock(mt);
6434 if (mas_is_err(&ms))
6435 return xa_err(ms.node);
6436
6437 return 0;
6438 }
6439 EXPORT_SYMBOL(mtree_insert_range);
6440
6441 /**
6442 * mtree_insert() - Insert an entry at a give index if there is no value.
6443 * @mt: The maple tree
6444 * @index : The index to store the value
6445 * @entry: The entry to store
6446 * @gfp: The FGP_FLAGS to use for allocations.
6447 *
6448 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6449 * request, -ENOMEM if memory could not be allocated.
6450 */
6451 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6452 gfp_t gfp)
6453 {
6454 return mtree_insert_range(mt, index, index, entry, gfp);
6455 }
6456 EXPORT_SYMBOL(mtree_insert);
6457
6458 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6459 void *entry, unsigned long size, unsigned long min,
6460 unsigned long max, gfp_t gfp)
6461 {
6462 int ret = 0;
6463
6464 MA_STATE(mas, mt, min, min);
6465 if (!mt_is_alloc(mt))
6466 return -EINVAL;
6467
6468 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6469 return -EINVAL;
6470
6471 if (min > max)
6472 return -EINVAL;
6473
6474 if (max < size)
6475 return -EINVAL;
6476
6477 if (!size)
6478 return -EINVAL;
6479
6480 mtree_lock(mt);
6481 retry:
6482 mas.offset = 0;
6483 mas.index = min;
6484 mas.last = max - size + 1;
6485 ret = mas_alloc(&mas, entry, size, startp);
6486 if (mas_nomem(&mas, gfp))
6487 goto retry;
6488
6489 mtree_unlock(mt);
6490 return ret;
6491 }
6492 EXPORT_SYMBOL(mtree_alloc_range);
6493
6494 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6495 void *entry, unsigned long size, unsigned long min,
6496 unsigned long max, gfp_t gfp)
6497 {
6498 int ret = 0;
6499
6500 MA_STATE(mas, mt, min, max - size + 1);
6501 if (!mt_is_alloc(mt))
6502 return -EINVAL;
6503
6504 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6505 return -EINVAL;
6506
6507 if (min > max)
6508 return -EINVAL;
6509
6510 if (max < size - 1)
6511 return -EINVAL;
6512
6513 if (!size)
6514 return -EINVAL;
6515
6516 mtree_lock(mt);
6517 retry:
6518 ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6519 if (mas_nomem(&mas, gfp))
6520 goto retry;
6521
6522 mtree_unlock(mt);
6523 return ret;
6524 }
6525 EXPORT_SYMBOL(mtree_alloc_rrange);
6526
6527 /**
6528 * mtree_erase() - Find an index and erase the entire range.
6529 * @mt: The maple tree
6530 * @index: The index to erase
6531 *
6532 * Erasing is the same as a walk to an entry then a store of a NULL to that
6533 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6534 *
6535 * Return: The entry stored at the @index or %NULL
6536 */
6537 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6538 {
6539 void *entry = NULL;
6540
6541 MA_STATE(mas, mt, index, index);
6542 trace_ma_op(__func__, &mas);
6543
6544 mtree_lock(mt);
6545 entry = mas_erase(&mas);
6546 mtree_unlock(mt);
6547
6548 return entry;
6549 }
6550 EXPORT_SYMBOL(mtree_erase);
6551
6552 /**
6553 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6554 * @mt: The maple tree
6555 *
6556 * Note: Does not handle locking.
6557 */
6558 void __mt_destroy(struct maple_tree *mt)
6559 {
6560 void *root = mt_root_locked(mt);
6561
6562 rcu_assign_pointer(mt->ma_root, NULL);
6563 if (xa_is_node(root))
6564 mte_destroy_walk(root, mt);
6565
6566 mt->ma_flags = 0;
6567 }
6568 EXPORT_SYMBOL_GPL(__mt_destroy);
6569
6570 /**
6571 * mtree_destroy() - Destroy a maple tree
6572 * @mt: The maple tree
6573 *
6574 * Frees all resources used by the tree. Handles locking.
6575 */
6576 void mtree_destroy(struct maple_tree *mt)
6577 {
6578 mtree_lock(mt);
6579 __mt_destroy(mt);
6580 mtree_unlock(mt);
6581 }
6582 EXPORT_SYMBOL(mtree_destroy);
6583
6584 /**
6585 * mt_find() - Search from the start up until an entry is found.
6586 * @mt: The maple tree
6587 * @index: Pointer which contains the start location of the search
6588 * @max: The maximum value to check
6589 *
6590 * Handles locking. @index will be incremented to one beyond the range.
6591 *
6592 * Return: The entry at or after the @index or %NULL
6593 */
6594 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6595 {
6596 MA_STATE(mas, mt, *index, *index);
6597 void *entry;
6598 #ifdef CONFIG_DEBUG_MAPLE_TREE
6599 unsigned long copy = *index;
6600 #endif
6601
6602 trace_ma_read(__func__, &mas);
6603
6604 if ((*index) > max)
6605 return NULL;
6606
6607 rcu_read_lock();
6608 retry:
6609 entry = mas_state_walk(&mas);
6610 if (mas_is_start(&mas))
6611 goto retry;
6612
6613 if (unlikely(xa_is_zero(entry)))
6614 entry = NULL;
6615
6616 if (entry)
6617 goto unlock;
6618
6619 while (mas_searchable(&mas) && (mas.last < max)) {
6620 entry = mas_next_entry(&mas, max);
6621 if (likely(entry && !xa_is_zero(entry)))
6622 break;
6623 }
6624
6625 if (unlikely(xa_is_zero(entry)))
6626 entry = NULL;
6627 unlock:
6628 rcu_read_unlock();
6629 if (likely(entry)) {
6630 *index = mas.last + 1;
6631 #ifdef CONFIG_DEBUG_MAPLE_TREE
6632 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6633 pr_err("index not increased! %lx <= %lx\n",
6634 *index, copy);
6635 #endif
6636 }
6637
6638 return entry;
6639 }
6640 EXPORT_SYMBOL(mt_find);
6641
6642 /**
6643 * mt_find_after() - Search from the start up until an entry is found.
6644 * @mt: The maple tree
6645 * @index: Pointer which contains the start location of the search
6646 * @max: The maximum value to check
6647 *
6648 * Handles locking, detects wrapping on index == 0
6649 *
6650 * Return: The entry at or after the @index or %NULL
6651 */
6652 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6653 unsigned long max)
6654 {
6655 if (!(*index))
6656 return NULL;
6657
6658 return mt_find(mt, index, max);
6659 }
6660 EXPORT_SYMBOL(mt_find_after);
6661
6662 #ifdef CONFIG_DEBUG_MAPLE_TREE
6663 atomic_t maple_tree_tests_run;
6664 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6665 atomic_t maple_tree_tests_passed;
6666 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6667
6668 #ifndef __KERNEL__
6669 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6670 void mt_set_non_kernel(unsigned int val)
6671 {
6672 kmem_cache_set_non_kernel(maple_node_cache, val);
6673 }
6674
6675 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6676 unsigned long mt_get_alloc_size(void)
6677 {
6678 return kmem_cache_get_alloc(maple_node_cache);
6679 }
6680
6681 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6682 void mt_zero_nr_tallocated(void)
6683 {
6684 kmem_cache_zero_nr_tallocated(maple_node_cache);
6685 }
6686
6687 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6688 unsigned int mt_nr_tallocated(void)
6689 {
6690 return kmem_cache_nr_tallocated(maple_node_cache);
6691 }
6692
6693 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6694 unsigned int mt_nr_allocated(void)
6695 {
6696 return kmem_cache_nr_allocated(maple_node_cache);
6697 }
6698
6699 /*
6700 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6701 * @mas: The maple state
6702 * @index: The index to restore in @mas.
6703 *
6704 * Used in test code.
6705 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6706 */
6707 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6708 {
6709 if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6710 return 0;
6711
6712 if (likely(!mte_dead_node(mas->node)))
6713 return 0;
6714
6715 mas_rewalk(mas, index);
6716 return 1;
6717 }
6718
6719 void mt_cache_shrink(void)
6720 {
6721 }
6722 #else
6723 /*
6724 * mt_cache_shrink() - For testing, don't use this.
6725 *
6726 * Certain testcases can trigger an OOM when combined with other memory
6727 * debugging configuration options. This function is used to reduce the
6728 * possibility of an out of memory even due to kmem_cache objects remaining
6729 * around for longer than usual.
6730 */
6731 void mt_cache_shrink(void)
6732 {
6733 kmem_cache_shrink(maple_node_cache);
6734
6735 }
6736 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6737
6738 #endif /* not defined __KERNEL__ */
6739 /*
6740 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6741 * @mas: The maple state
6742 * @offset: The offset into the slot array to fetch.
6743 *
6744 * Return: The entry stored at @offset.
6745 */
6746 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6747 unsigned char offset)
6748 {
6749 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6750 offset);
6751 }
6752
6753
6754 /*
6755 * mas_first_entry() - Go the first leaf and find the first entry.
6756 * @mas: the maple state.
6757 * @limit: the maximum index to check.
6758 * @*r_start: Pointer to set to the range start.
6759 *
6760 * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6761 *
6762 * Return: The first entry or MAS_NONE.
6763 */
6764 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6765 unsigned long limit, enum maple_type mt)
6766
6767 {
6768 unsigned long max;
6769 unsigned long *pivots;
6770 void __rcu **slots;
6771 void *entry = NULL;
6772
6773 mas->index = mas->min;
6774 if (mas->index > limit)
6775 goto none;
6776
6777 max = mas->max;
6778 mas->offset = 0;
6779 while (likely(!ma_is_leaf(mt))) {
6780 MAS_WARN_ON(mas, mte_dead_node(mas->node));
6781 slots = ma_slots(mn, mt);
6782 entry = mas_slot(mas, slots, 0);
6783 pivots = ma_pivots(mn, mt);
6784 if (unlikely(ma_dead_node(mn)))
6785 return NULL;
6786 max = pivots[0];
6787 mas->node = entry;
6788 mn = mas_mn(mas);
6789 mt = mte_node_type(mas->node);
6790 }
6791 MAS_WARN_ON(mas, mte_dead_node(mas->node));
6792
6793 mas->max = max;
6794 slots = ma_slots(mn, mt);
6795 entry = mas_slot(mas, slots, 0);
6796 if (unlikely(ma_dead_node(mn)))
6797 return NULL;
6798
6799 /* Slot 0 or 1 must be set */
6800 if (mas->index > limit)
6801 goto none;
6802
6803 if (likely(entry))
6804 return entry;
6805
6806 mas->offset = 1;
6807 entry = mas_slot(mas, slots, 1);
6808 pivots = ma_pivots(mn, mt);
6809 if (unlikely(ma_dead_node(mn)))
6810 return NULL;
6811
6812 mas->index = pivots[0] + 1;
6813 if (mas->index > limit)
6814 goto none;
6815
6816 if (likely(entry))
6817 return entry;
6818
6819 none:
6820 if (likely(!ma_dead_node(mn)))
6821 mas->node = MAS_NONE;
6822 return NULL;
6823 }
6824
6825 /* Depth first search, post-order */
6826 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6827 {
6828
6829 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6830 unsigned long p_min, p_max;
6831
6832 mas_next_node(mas, mas_mn(mas), max);
6833 if (!mas_is_none(mas))
6834 return;
6835
6836 if (mte_is_root(mn))
6837 return;
6838
6839 mas->node = mn;
6840 mas_ascend(mas);
6841 do {
6842 p = mas->node;
6843 p_min = mas->min;
6844 p_max = mas->max;
6845 mas_prev_node(mas, 0);
6846 } while (!mas_is_none(mas));
6847
6848 mas->node = p;
6849 mas->max = p_max;
6850 mas->min = p_min;
6851 }
6852
6853 /* Tree validations */
6854 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6855 unsigned long min, unsigned long max, unsigned int depth,
6856 enum mt_dump_format format);
6857 static void mt_dump_range(unsigned long min, unsigned long max,
6858 unsigned int depth, enum mt_dump_format format)
6859 {
6860 static const char spaces[] = " ";
6861
6862 switch(format) {
6863 case mt_dump_hex:
6864 if (min == max)
6865 pr_info("%.*s%lx: ", depth * 2, spaces, min);
6866 else
6867 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
6868 break;
6869 default:
6870 case mt_dump_dec:
6871 if (min == max)
6872 pr_info("%.*s%lu: ", depth * 2, spaces, min);
6873 else
6874 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6875 }
6876 }
6877
6878 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6879 unsigned int depth, enum mt_dump_format format)
6880 {
6881 mt_dump_range(min, max, depth, format);
6882
6883 if (xa_is_value(entry))
6884 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6885 xa_to_value(entry), entry);
6886 else if (xa_is_zero(entry))
6887 pr_cont("zero (%ld)\n", xa_to_internal(entry));
6888 else if (mt_is_reserved(entry))
6889 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6890 else
6891 pr_cont("%p\n", entry);
6892 }
6893
6894 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6895 unsigned long min, unsigned long max, unsigned int depth,
6896 enum mt_dump_format format)
6897 {
6898 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6899 bool leaf = mte_is_leaf(entry);
6900 unsigned long first = min;
6901 int i;
6902
6903 pr_cont(" contents: ");
6904 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
6905 switch(format) {
6906 case mt_dump_hex:
6907 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
6908 break;
6909 default:
6910 case mt_dump_dec:
6911 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6912 }
6913 }
6914 pr_cont("%p\n", node->slot[i]);
6915 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6916 unsigned long last = max;
6917
6918 if (i < (MAPLE_RANGE64_SLOTS - 1))
6919 last = node->pivot[i];
6920 else if (!node->slot[i] && max != mt_node_max(entry))
6921 break;
6922 if (last == 0 && i > 0)
6923 break;
6924 if (leaf)
6925 mt_dump_entry(mt_slot(mt, node->slot, i),
6926 first, last, depth + 1, format);
6927 else if (node->slot[i])
6928 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6929 first, last, depth + 1, format);
6930
6931 if (last == max)
6932 break;
6933 if (last > max) {
6934 switch(format) {
6935 case mt_dump_hex:
6936 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
6937 node, last, max, i);
6938 break;
6939 default:
6940 case mt_dump_dec:
6941 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6942 node, last, max, i);
6943 }
6944 }
6945 first = last + 1;
6946 }
6947 }
6948
6949 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6950 unsigned long min, unsigned long max, unsigned int depth,
6951 enum mt_dump_format format)
6952 {
6953 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6954 bool leaf = mte_is_leaf(entry);
6955 unsigned long first = min;
6956 int i;
6957
6958 pr_cont(" contents: ");
6959 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6960 pr_cont("%lu ", node->gap[i]);
6961 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6962 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6963 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6964 pr_cont("%p\n", node->slot[i]);
6965 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6966 unsigned long last = max;
6967
6968 if (i < (MAPLE_ARANGE64_SLOTS - 1))
6969 last = node->pivot[i];
6970 else if (!node->slot[i])
6971 break;
6972 if (last == 0 && i > 0)
6973 break;
6974 if (leaf)
6975 mt_dump_entry(mt_slot(mt, node->slot, i),
6976 first, last, depth + 1, format);
6977 else if (node->slot[i])
6978 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6979 first, last, depth + 1, format);
6980
6981 if (last == max)
6982 break;
6983 if (last > max) {
6984 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6985 node, last, max, i);
6986 break;
6987 }
6988 first = last + 1;
6989 }
6990 }
6991
6992 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6993 unsigned long min, unsigned long max, unsigned int depth,
6994 enum mt_dump_format format)
6995 {
6996 struct maple_node *node = mte_to_node(entry);
6997 unsigned int type = mte_node_type(entry);
6998 unsigned int i;
6999
7000 mt_dump_range(min, max, depth, format);
7001
7002 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
7003 node ? node->parent : NULL);
7004 switch (type) {
7005 case maple_dense:
7006 pr_cont("\n");
7007 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
7008 if (min + i > max)
7009 pr_cont("OUT OF RANGE: ");
7010 mt_dump_entry(mt_slot(mt, node->slot, i),
7011 min + i, min + i, depth, format);
7012 }
7013 break;
7014 case maple_leaf_64:
7015 case maple_range_64:
7016 mt_dump_range64(mt, entry, min, max, depth, format);
7017 break;
7018 case maple_arange_64:
7019 mt_dump_arange64(mt, entry, min, max, depth, format);
7020 break;
7021
7022 default:
7023 pr_cont(" UNKNOWN TYPE\n");
7024 }
7025 }
7026
7027 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
7028 {
7029 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
7030
7031 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7032 mt, mt->ma_flags, mt_height(mt), entry);
7033 if (!xa_is_node(entry))
7034 mt_dump_entry(entry, 0, 0, 0, format);
7035 else if (entry)
7036 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
7037 }
7038 EXPORT_SYMBOL_GPL(mt_dump);
7039
7040 /*
7041 * Calculate the maximum gap in a node and check if that's what is reported in
7042 * the parent (unless root).
7043 */
7044 static void mas_validate_gaps(struct ma_state *mas)
7045 {
7046 struct maple_enode *mte = mas->node;
7047 struct maple_node *p_mn;
7048 unsigned long gap = 0, max_gap = 0;
7049 unsigned long p_end, p_start = mas->min;
7050 unsigned char p_slot;
7051 unsigned long *gaps = NULL;
7052 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
7053 int i;
7054
7055 if (ma_is_dense(mte_node_type(mte))) {
7056 for (i = 0; i < mt_slot_count(mte); i++) {
7057 if (mas_get_slot(mas, i)) {
7058 if (gap > max_gap)
7059 max_gap = gap;
7060 gap = 0;
7061 continue;
7062 }
7063 gap++;
7064 }
7065 goto counted;
7066 }
7067
7068 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
7069 for (i = 0; i < mt_slot_count(mte); i++) {
7070 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
7071
7072 if (!gaps) {
7073 if (mas_get_slot(mas, i)) {
7074 gap = 0;
7075 goto not_empty;
7076 }
7077
7078 gap += p_end - p_start + 1;
7079 } else {
7080 void *entry = mas_get_slot(mas, i);
7081
7082 gap = gaps[i];
7083 if (!entry) {
7084 if (gap != p_end - p_start + 1) {
7085 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
7086 mas_mn(mas), i,
7087 mas_get_slot(mas, i), gap,
7088 p_end, p_start);
7089 mt_dump(mas->tree, mt_dump_hex);
7090
7091 MT_BUG_ON(mas->tree,
7092 gap != p_end - p_start + 1);
7093 }
7094 } else {
7095 if (gap > p_end - p_start + 1) {
7096 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7097 mas_mn(mas), i, gap, p_end, p_start,
7098 p_end - p_start + 1);
7099 MT_BUG_ON(mas->tree,
7100 gap > p_end - p_start + 1);
7101 }
7102 }
7103 }
7104
7105 if (gap > max_gap)
7106 max_gap = gap;
7107 not_empty:
7108 p_start = p_end + 1;
7109 if (p_end >= mas->max)
7110 break;
7111 }
7112
7113 counted:
7114 if (mte_is_root(mte))
7115 return;
7116
7117 p_slot = mte_parent_slot(mas->node);
7118 p_mn = mte_parent(mte);
7119 MT_BUG_ON(mas->tree, max_gap > mas->max);
7120 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7121 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7122 mt_dump(mas->tree, mt_dump_hex);
7123 }
7124
7125 MT_BUG_ON(mas->tree,
7126 ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap);
7127 }
7128
7129 static void mas_validate_parent_slot(struct ma_state *mas)
7130 {
7131 struct maple_node *parent;
7132 struct maple_enode *node;
7133 enum maple_type p_type;
7134 unsigned char p_slot;
7135 void __rcu **slots;
7136 int i;
7137
7138 if (mte_is_root(mas->node))
7139 return;
7140
7141 p_slot = mte_parent_slot(mas->node);
7142 p_type = mas_parent_type(mas, mas->node);
7143 parent = mte_parent(mas->node);
7144 slots = ma_slots(parent, p_type);
7145 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7146
7147 /* Check prev/next parent slot for duplicate node entry */
7148
7149 for (i = 0; i < mt_slots[p_type]; i++) {
7150 node = mas_slot(mas, slots, i);
7151 if (i == p_slot) {
7152 if (node != mas->node)
7153 pr_err("parent %p[%u] does not have %p\n",
7154 parent, i, mas_mn(mas));
7155 MT_BUG_ON(mas->tree, node != mas->node);
7156 } else if (node == mas->node) {
7157 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7158 mas_mn(mas), parent, i, p_slot);
7159 MT_BUG_ON(mas->tree, node == mas->node);
7160 }
7161 }
7162 }
7163
7164 static void mas_validate_child_slot(struct ma_state *mas)
7165 {
7166 enum maple_type type = mte_node_type(mas->node);
7167 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7168 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7169 struct maple_enode *child;
7170 unsigned char i;
7171
7172 if (mte_is_leaf(mas->node))
7173 return;
7174
7175 for (i = 0; i < mt_slots[type]; i++) {
7176 child = mas_slot(mas, slots, i);
7177 if (!pivots[i] || pivots[i] == mas->max)
7178 break;
7179
7180 if (!child)
7181 break;
7182
7183 if (mte_parent_slot(child) != i) {
7184 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7185 mas_mn(mas), i, mte_to_node(child),
7186 mte_parent_slot(child));
7187 MT_BUG_ON(mas->tree, 1);
7188 }
7189
7190 if (mte_parent(child) != mte_to_node(mas->node)) {
7191 pr_err("child %p has parent %p not %p\n",
7192 mte_to_node(child), mte_parent(child),
7193 mte_to_node(mas->node));
7194 MT_BUG_ON(mas->tree, 1);
7195 }
7196 }
7197 }
7198
7199 /*
7200 * Validate all pivots are within mas->min and mas->max.
7201 */
7202 static void mas_validate_limits(struct ma_state *mas)
7203 {
7204 int i;
7205 unsigned long prev_piv = 0;
7206 enum maple_type type = mte_node_type(mas->node);
7207 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7208 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7209
7210 /* all limits are fine here. */
7211 if (mte_is_root(mas->node))
7212 return;
7213
7214 for (i = 0; i < mt_slots[type]; i++) {
7215 unsigned long piv;
7216
7217 piv = mas_safe_pivot(mas, pivots, i, type);
7218
7219 if (!piv && (i != 0))
7220 break;
7221
7222 if (!mte_is_leaf(mas->node)) {
7223 void *entry = mas_slot(mas, slots, i);
7224
7225 if (!entry)
7226 pr_err("%p[%u] cannot be null\n",
7227 mas_mn(mas), i);
7228
7229 MT_BUG_ON(mas->tree, !entry);
7230 }
7231
7232 if (prev_piv > piv) {
7233 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7234 mas_mn(mas), i, piv, prev_piv);
7235 MAS_WARN_ON(mas, piv < prev_piv);
7236 }
7237
7238 if (piv < mas->min) {
7239 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7240 piv, mas->min);
7241 MAS_WARN_ON(mas, piv < mas->min);
7242 }
7243 if (piv > mas->max) {
7244 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7245 piv, mas->max);
7246 MAS_WARN_ON(mas, piv > mas->max);
7247 }
7248 prev_piv = piv;
7249 if (piv == mas->max)
7250 break;
7251 }
7252 for (i += 1; i < mt_slots[type]; i++) {
7253 void *entry = mas_slot(mas, slots, i);
7254
7255 if (entry && (i != mt_slots[type] - 1)) {
7256 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7257 i, entry);
7258 MT_BUG_ON(mas->tree, entry != NULL);
7259 }
7260
7261 if (i < mt_pivots[type]) {
7262 unsigned long piv = pivots[i];
7263
7264 if (!piv)
7265 continue;
7266
7267 pr_err("%p[%u] should not have piv %lu\n",
7268 mas_mn(mas), i, piv);
7269 MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7270 }
7271 }
7272 }
7273
7274 static void mt_validate_nulls(struct maple_tree *mt)
7275 {
7276 void *entry, *last = (void *)1;
7277 unsigned char offset = 0;
7278 void __rcu **slots;
7279 MA_STATE(mas, mt, 0, 0);
7280
7281 mas_start(&mas);
7282 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7283 return;
7284
7285 while (!mte_is_leaf(mas.node))
7286 mas_descend(&mas);
7287
7288 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7289 do {
7290 entry = mas_slot(&mas, slots, offset);
7291 if (!last && !entry) {
7292 pr_err("Sequential nulls end at %p[%u]\n",
7293 mas_mn(&mas), offset);
7294 }
7295 MT_BUG_ON(mt, !last && !entry);
7296 last = entry;
7297 if (offset == mas_data_end(&mas)) {
7298 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7299 if (mas_is_none(&mas))
7300 return;
7301 offset = 0;
7302 slots = ma_slots(mte_to_node(mas.node),
7303 mte_node_type(mas.node));
7304 } else {
7305 offset++;
7306 }
7307
7308 } while (!mas_is_none(&mas));
7309 }
7310
7311 /*
7312 * validate a maple tree by checking:
7313 * 1. The limits (pivots are within mas->min to mas->max)
7314 * 2. The gap is correctly set in the parents
7315 */
7316 void mt_validate(struct maple_tree *mt)
7317 {
7318 unsigned char end;
7319
7320 MA_STATE(mas, mt, 0, 0);
7321 rcu_read_lock();
7322 mas_start(&mas);
7323 if (!mas_searchable(&mas))
7324 goto done;
7325
7326 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7327 while (!mas_is_none(&mas)) {
7328 MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7329 if (!mte_is_root(mas.node)) {
7330 end = mas_data_end(&mas);
7331 if (MAS_WARN_ON(&mas,
7332 (end < mt_min_slot_count(mas.node)) &&
7333 (mas.max != ULONG_MAX))) {
7334 pr_err("Invalid size %u of %p\n", end,
7335 mas_mn(&mas));
7336 }
7337 }
7338 mas_validate_parent_slot(&mas);
7339 mas_validate_child_slot(&mas);
7340 mas_validate_limits(&mas);
7341 if (mt_is_alloc(mt))
7342 mas_validate_gaps(&mas);
7343 mas_dfs_postorder(&mas, ULONG_MAX);
7344 }
7345 mt_validate_nulls(mt);
7346 done:
7347 rcu_read_unlock();
7348
7349 }
7350 EXPORT_SYMBOL_GPL(mt_validate);
7351
7352 void mas_dump(const struct ma_state *mas)
7353 {
7354 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7355 if (mas_is_none(mas))
7356 pr_err("(MAS_NONE) ");
7357 else if (mas_is_ptr(mas))
7358 pr_err("(MAS_ROOT) ");
7359 else if (mas_is_start(mas))
7360 pr_err("(MAS_START) ");
7361 else if (mas_is_paused(mas))
7362 pr_err("(MAS_PAUSED) ");
7363
7364 pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
7365 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7366 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7367 if (mas->index > mas->last)
7368 pr_err("Check index & last\n");
7369 }
7370 EXPORT_SYMBOL_GPL(mas_dump);
7371
7372 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7373 {
7374 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7375 wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7376 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7377 wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
7378 wr_mas->end_piv);
7379 }
7380 EXPORT_SYMBOL_GPL(mas_wr_dump);
7381
7382 #endif /* CONFIG_DEBUG_MAPLE_TREE */