1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * multiorder.c: Multi-order radix tree entry testing 4 * Copyright (c) 2016 Intel Corporation 5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 6 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> 7 */ 8 #include <linux/radix-tree.h> 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <pthread.h> 12 13 #include "test.h" 14 15 static int item_insert_order(struct xarray *xa, unsigned long index, 16 unsigned order) 17 { 18 XA_STATE_ORDER(xas, xa, index, order); 19 struct item *item = item_create(index, order); 20 21 do { 22 xas_lock(&xas); 23 xas_store(&xas, item); 24 xas_unlock(&xas); 25 } while (xas_nomem(&xas, GFP_KERNEL)); 26 27 if (!xas_error(&xas)) 28 return 0; 29 30 free(item); 31 return xas_error(&xas); 32 } 33 34 void multiorder_iteration(struct xarray *xa) 35 { 36 XA_STATE(xas, xa, 0); 37 struct item *item; 38 int i, j, err; 39 40 #define NUM_ENTRIES 11 41 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; 42 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; 43 44 printv(1, "Multiorder iteration test\n"); 45 46 for (i = 0; i < NUM_ENTRIES; i++) { 47 err = item_insert_order(xa, index[i], order[i]); 48 assert(!err); 49 } 50 51 for (j = 0; j < 256; j++) { 52 for (i = 0; i < NUM_ENTRIES; i++) 53 if (j <= (index[i] | ((1 << order[i]) - 1))) 54 break; 55 56 xas_set(&xas, j); 57 xas_for_each(&xas, item, ULONG_MAX) { 58 int height = order[i] / XA_CHUNK_SHIFT; 59 int shift = height * XA_CHUNK_SHIFT; 60 unsigned long mask = (1UL << order[i]) - 1; 61 62 assert((xas.xa_index | mask) == (index[i] | mask)); 63 assert(xas.xa_node->shift == shift); 64 assert(!radix_tree_is_internal_node(item)); 65 assert((item->index | mask) == (index[i] | mask)); 66 assert(item->order == order[i]); 67 i++; 68 } 69 } 70 71 item_kill_tree(xa); 72 } 73 74 void multiorder_tagged_iteration(struct xarray *xa) 75 { 76 XA_STATE(xas, xa, 0); 77 struct item *item; 78 int i, j; 79 80 #define MT_NUM_ENTRIES 9 81 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; 82 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; 83 84 #define TAG_ENTRIES 7 85 int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128}; 86 87 printv(1, "Multiorder tagged iteration test\n"); 88 89 for (i = 0; i < MT_NUM_ENTRIES; i++) 90 assert(!item_insert_order(xa, index[i], order[i])); 91 92 assert(!xa_marked(xa, XA_MARK_1)); 93 94 for (i = 0; i < TAG_ENTRIES; i++) 95 xa_set_mark(xa, tag_index[i], XA_MARK_1); 96 97 for (j = 0; j < 256; j++) { 98 int k; 99 100 for (i = 0; i < TAG_ENTRIES; i++) { 101 for (k = i; index[k] < tag_index[i]; k++) 102 ; 103 if (j <= (index[k] | ((1 << order[k]) - 1))) 104 break; 105 } 106 107 xas_set(&xas, j); 108 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) { 109 unsigned long mask; 110 for (k = i; index[k] < tag_index[i]; k++) 111 ; 112 mask = (1UL << order[k]) - 1; 113 114 assert((xas.xa_index | mask) == (tag_index[i] | mask)); 115 assert(!xa_is_internal(item)); 116 assert((item->index | mask) == (tag_index[i] | mask)); 117 assert(item->order == order[k]); 118 i++; 119 } 120 } 121 122 assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1, 123 XA_MARK_2) == TAG_ENTRIES); 124 125 for (j = 0; j < 256; j++) { 126 int mask, k; 127 128 for (i = 0; i < TAG_ENTRIES; i++) { 129 for (k = i; index[k] < tag_index[i]; k++) 130 ; 131 if (j <= (index[k] | ((1 << order[k]) - 1))) 132 break; 133 } 134 135 xas_set(&xas, j); 136 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) { 137 for (k = i; index[k] < tag_index[i]; k++) 138 ; 139 mask = (1 << order[k]) - 1; 140 141 assert((xas.xa_index | mask) == (tag_index[i] | mask)); 142 assert(!xa_is_internal(item)); 143 assert((item->index | mask) == (tag_index[i] | mask)); 144 assert(item->order == order[k]); 145 i++; 146 } 147 } 148 149 assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1, 150 XA_MARK_0) == TAG_ENTRIES); 151 i = 0; 152 xas_set(&xas, 0); 153 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) { 154 assert(xas.xa_index == tag_index[i]); 155 i++; 156 } 157 assert(i == TAG_ENTRIES); 158 159 item_kill_tree(xa); 160 } 161 162 bool stop_iteration; 163 164 static void *creator_func(void *ptr) 165 { 166 /* 'order' is set up to ensure we have sibling entries */ 167 unsigned int order = RADIX_TREE_MAP_SHIFT - 1; 168 struct radix_tree_root *tree = ptr; 169 int i; 170 171 for (i = 0; i < 10000; i++) { 172 item_insert_order(tree, 0, order); 173 item_delete_rcu(tree, 0); 174 } 175 176 stop_iteration = true; 177 return NULL; 178 } 179 180 static void *iterator_func(void *ptr) 181 { 182 XA_STATE(xas, ptr, 0); 183 struct item *item; 184 185 while (!stop_iteration) { 186 rcu_read_lock(); 187 xas_for_each(&xas, item, ULONG_MAX) { 188 if (xas_retry(&xas, item)) 189 continue; 190 191 item_sanity(item, xas.xa_index); 192 } 193 rcu_read_unlock(); 194 } 195 return NULL; 196 } 197 198 static void multiorder_iteration_race(struct xarray *xa) 199 { 200 const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); 201 pthread_t worker_thread[num_threads]; 202 int i; 203 204 stop_iteration = false; 205 pthread_create(&worker_thread[0], NULL, &creator_func, xa); 206 for (i = 1; i < num_threads; i++) 207 pthread_create(&worker_thread[i], NULL, &iterator_func, xa); 208 209 for (i = 0; i < num_threads; i++) 210 pthread_join(worker_thread[i], NULL); 211 212 item_kill_tree(xa); 213 } 214 215 static void *load_creator(void *ptr) 216 { 217 /* 'order' is set up to ensure we have sibling entries */ 218 unsigned int order; 219 struct radix_tree_root *tree = ptr; 220 int i; 221 222 rcu_register_thread(); 223 item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0); 224 item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0); 225 for (i = 0; i < 10000; i++) { 226 for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) { 227 unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 228 (1 << order); 229 item_insert_order(tree, index, order); 230 item_delete_rcu(tree, index); 231 } 232 } 233 rcu_unregister_thread(); 234 235 stop_iteration = true; 236 return NULL; 237 } 238 239 static void *load_worker(void *ptr) 240 { 241 unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1; 242 243 rcu_register_thread(); 244 while (!stop_iteration) { 245 struct item *item = xa_load(ptr, index); 246 assert(!xa_is_internal(item)); 247 } 248 rcu_unregister_thread(); 249 250 return NULL; 251 } 252 253 static void load_race(struct xarray *xa) 254 { 255 const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4; 256 pthread_t worker_thread[num_threads]; 257 int i; 258 259 stop_iteration = false; 260 pthread_create(&worker_thread[0], NULL, &load_creator, xa); 261 for (i = 1; i < num_threads; i++) 262 pthread_create(&worker_thread[i], NULL, &load_worker, xa); 263 264 for (i = 0; i < num_threads; i++) 265 pthread_join(worker_thread[i], NULL); 266 267 item_kill_tree(xa); 268 } 269 270 static DEFINE_XARRAY(array); 271 272 void multiorder_checks(void) 273 { 274 multiorder_iteration(&array); 275 multiorder_tagged_iteration(&array); 276 multiorder_iteration_race(&array); 277 load_race(&array); 278 279 radix_tree_cpu_dead(0); 280 } 281 282 int __weak main(int argc, char **argv) 283 { 284 int opt; 285 286 while ((opt = getopt(argc, argv, "ls:v")) != -1) { 287 if (opt == 'v') 288 test_verbose++; 289 } 290 291 rcu_register_thread(); 292 radix_tree_init(); 293 multiorder_checks(); 294 rcu_unregister_thread(); 295 return 0; 296 } 297
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.