1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 Red Black Trees 4 (C) 1999 Andrea Arcangeli <andrea@suse.de> 5 (C) 2002 David Woodhouse <dwmw2@infradead.org> 6 (C) 2012 Michel Lespinasse <walken@google.com> 7 8 9 linux/include/linux/rbtree_augmented.h 10 */ 11 12 #ifndef _LINUX_RBTREE_AUGMENTED_H 13 #define _LINUX_RBTREE_AUGMENTED_H 14 15 #include <linux/compiler.h> 16 #include <linux/rbtree.h> 17 #include <linux/rcupdate.h> 18 19 /* 20 * Please note - only struct rb_augment_callbacks and the prototypes for 21 * rb_insert_augmented() and rb_erase_augmented() are intended to be public. 22 * The rest are implementation details you are not expected to depend on. 23 * 24 * See Documentation/core-api/rbtree.rst for documentation and samples. 25 */ 26 27 struct rb_augment_callbacks { 28 void (*propagate)(struct rb_node *node, struct rb_node *stop); 29 void (*copy)(struct rb_node *old, struct rb_node *new); 30 void (*rotate)(struct rb_node *old, struct rb_node *new); 31 }; 32 33 extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 34 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 35 36 /* 37 * Fixup the rbtree and update the augmented information when rebalancing. 38 * 39 * On insertion, the user must update the augmented information on the path 40 * leading to the inserted node, then call rb_link_node() as usual and 41 * rb_insert_augmented() instead of the usual rb_insert_color() call. 42 * If rb_insert_augmented() rebalances the rbtree, it will callback into 43 * a user provided function to update the augmented information on the 44 * affected subtrees. 45 */ 46 static inline void 47 rb_insert_augmented(struct rb_node *node, struct rb_root *root, 48 const struct rb_augment_callbacks *augment) 49 { 50 __rb_insert_augmented(node, root, augment->rotate); 51 } 52 53 static inline void 54 rb_insert_augmented_cached(struct rb_node *node, 55 struct rb_root_cached *root, bool newleft, 56 const struct rb_augment_callbacks *augment) 57 { 58 if (newleft) 59 root->rb_leftmost = node; 60 rb_insert_augmented(node, &root->rb_root, augment); 61 } 62 63 static __always_inline struct rb_node * 64 rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree, 65 bool (*less)(struct rb_node *, const struct rb_node *), 66 const struct rb_augment_callbacks *augment) 67 { 68 struct rb_node **link = &tree->rb_root.rb_node; 69 struct rb_node *parent = NULL; 70 bool leftmost = true; 71 72 while (*link) { 73 parent = *link; 74 if (less(node, parent)) { 75 link = &parent->rb_left; 76 } else { 77 link = &parent->rb_right; 78 leftmost = false; 79 } 80 } 81 82 rb_link_node(node, parent, link); 83 augment->propagate(parent, NULL); /* suboptimal */ 84 rb_insert_augmented_cached(node, tree, leftmost, augment); 85 86 return leftmost ? node : NULL; 87 } 88 89 /* 90 * Template for declaring augmented rbtree callbacks (generic case) 91 * 92 * RBSTATIC: 'static' or empty 93 * RBNAME: name of the rb_augment_callbacks structure 94 * RBSTRUCT: struct type of the tree nodes 95 * RBFIELD: name of struct rb_node field within RBSTRUCT 96 * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree 97 * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data 98 */ 99 100 #define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ 101 RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ 102 static inline void \ 103 RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 104 { \ 105 while (rb != stop) { \ 106 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ 107 if (RBCOMPUTE(node, true)) \ 108 break; \ 109 rb = rb_parent(&node->RBFIELD); \ 110 } \ 111 } \ 112 static inline void \ 113 RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ 114 { \ 115 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 116 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 117 new->RBAUGMENTED = old->RBAUGMENTED; \ 118 } \ 119 static void \ 120 RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ 121 { \ 122 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 123 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 124 new->RBAUGMENTED = old->RBAUGMENTED; \ 125 RBCOMPUTE(old, false); \ 126 } \ 127 RBSTATIC const struct rb_augment_callbacks RBNAME = { \ 128 .propagate = RBNAME ## _propagate, \ 129 .copy = RBNAME ## _copy, \ 130 .rotate = RBNAME ## _rotate \ 131 }; 132 133 /* 134 * Template for declaring augmented rbtree callbacks, 135 * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. 136 * 137 * RBSTATIC: 'static' or empty 138 * RBNAME: name of the rb_augment_callbacks structure 139 * RBSTRUCT: struct type of the tree nodes 140 * RBFIELD: name of struct rb_node field within RBSTRUCT 141 * RBTYPE: type of the RBAUGMENTED field 142 * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree 143 * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar 144 */ 145 146 #define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ 147 RBTYPE, RBAUGMENTED, RBCOMPUTE) \ 148 static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \ 149 { \ 150 RBSTRUCT *child; \ 151 RBTYPE max = RBCOMPUTE(node); \ 152 if (node->RBFIELD.rb_left) { \ 153 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ 154 if (child->RBAUGMENTED > max) \ 155 max = child->RBAUGMENTED; \ 156 } \ 157 if (node->RBFIELD.rb_right) { \ 158 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ 159 if (child->RBAUGMENTED > max) \ 160 max = child->RBAUGMENTED; \ 161 } \ 162 if (exit && node->RBAUGMENTED == max) \ 163 return true; \ 164 node->RBAUGMENTED = max; \ 165 return false; \ 166 } \ 167 RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ 168 RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max) 169 170 171 #define RB_RED 0 172 #define RB_BLACK 1 173 174 #define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) 175 176 #define __rb_color(pc) ((pc) & 1) 177 #define __rb_is_black(pc) __rb_color(pc) 178 #define __rb_is_red(pc) (!__rb_color(pc)) 179 #define rb_color(rb) __rb_color((rb)->__rb_parent_color) 180 #define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) 181 #define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) 182 183 static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) 184 { 185 rb->__rb_parent_color = rb_color(rb) + (unsigned long)p; 186 } 187 188 static inline void rb_set_parent_color(struct rb_node *rb, 189 struct rb_node *p, int color) 190 { 191 rb->__rb_parent_color = (unsigned long)p + color; 192 } 193 194 static inline void 195 __rb_change_child(struct rb_node *old, struct rb_node *new, 196 struct rb_node *parent, struct rb_root *root) 197 { 198 if (parent) { 199 if (parent->rb_left == old) 200 WRITE_ONCE(parent->rb_left, new); 201 else 202 WRITE_ONCE(parent->rb_right, new); 203 } else 204 WRITE_ONCE(root->rb_node, new); 205 } 206 207 static inline void 208 __rb_change_child_rcu(struct rb_node *old, struct rb_node *new, 209 struct rb_node *parent, struct rb_root *root) 210 { 211 if (parent) { 212 if (parent->rb_left == old) 213 rcu_assign_pointer(parent->rb_left, new); 214 else 215 rcu_assign_pointer(parent->rb_right, new); 216 } else 217 rcu_assign_pointer(root->rb_node, new); 218 } 219 220 extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, 221 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 222 223 static __always_inline struct rb_node * 224 __rb_erase_augmented(struct rb_node *node, struct rb_root *root, 225 const struct rb_augment_callbacks *augment) 226 { 227 struct rb_node *child = node->rb_right; 228 struct rb_node *tmp = node->rb_left; 229 struct rb_node *parent, *rebalance; 230 unsigned long pc; 231 232 if (!tmp) { 233 /* 234 * Case 1: node to erase has no more than 1 child (easy!) 235 * 236 * Note that if there is one child it must be red due to 5) 237 * and node must be black due to 4). We adjust colors locally 238 * so as to bypass __rb_erase_color() later on. 239 */ 240 pc = node->__rb_parent_color; 241 parent = __rb_parent(pc); 242 __rb_change_child(node, child, parent, root); 243 if (child) { 244 child->__rb_parent_color = pc; 245 rebalance = NULL; 246 } else 247 rebalance = __rb_is_black(pc) ? parent : NULL; 248 tmp = parent; 249 } else if (!child) { 250 /* Still case 1, but this time the child is node->rb_left */ 251 tmp->__rb_parent_color = pc = node->__rb_parent_color; 252 parent = __rb_parent(pc); 253 __rb_change_child(node, tmp, parent, root); 254 rebalance = NULL; 255 tmp = parent; 256 } else { 257 struct rb_node *successor = child, *child2; 258 259 tmp = child->rb_left; 260 if (!tmp) { 261 /* 262 * Case 2: node's successor is its right child 263 * 264 * (n) (s) 265 * / \ / \ 266 * (x) (s) -> (x) (c) 267 * \ 268 * (c) 269 */ 270 parent = successor; 271 child2 = successor->rb_right; 272 273 augment->copy(node, successor); 274 } else { 275 /* 276 * Case 3: node's successor is leftmost under 277 * node's right child subtree 278 * 279 * (n) (s) 280 * / \ / \ 281 * (x) (y) -> (x) (y) 282 * / / 283 * (p) (p) 284 * / / 285 * (s) (c) 286 * \ 287 * (c) 288 */ 289 do { 290 parent = successor; 291 successor = tmp; 292 tmp = tmp->rb_left; 293 } while (tmp); 294 child2 = successor->rb_right; 295 WRITE_ONCE(parent->rb_left, child2); 296 WRITE_ONCE(successor->rb_right, child); 297 rb_set_parent(child, successor); 298 299 augment->copy(node, successor); 300 augment->propagate(parent, successor); 301 } 302 303 tmp = node->rb_left; 304 WRITE_ONCE(successor->rb_left, tmp); 305 rb_set_parent(tmp, successor); 306 307 pc = node->__rb_parent_color; 308 tmp = __rb_parent(pc); 309 __rb_change_child(node, successor, tmp, root); 310 311 if (child2) { 312 rb_set_parent_color(child2, parent, RB_BLACK); 313 rebalance = NULL; 314 } else { 315 rebalance = rb_is_black(successor) ? parent : NULL; 316 } 317 successor->__rb_parent_color = pc; 318 tmp = successor; 319 } 320 321 augment->propagate(tmp, NULL); 322 return rebalance; 323 } 324 325 static __always_inline void 326 rb_erase_augmented(struct rb_node *node, struct rb_root *root, 327 const struct rb_augment_callbacks *augment) 328 { 329 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); 330 if (rebalance) 331 __rb_erase_color(rebalance, root, augment->rotate); 332 } 333 334 static __always_inline void 335 rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, 336 const struct rb_augment_callbacks *augment) 337 { 338 if (root->rb_leftmost == node) 339 root->rb_leftmost = rb_next(node); 340 rb_erase_augmented(node, &root->rb_root, augment); 341 } 342 343 #endif /* _LINUX_RBTREE_AUGMENTED_H */ 344
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.