1 // SPDX-License-Identifier: GPL-2.0+ 1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 2 /* 3 * test_xarray.c: Test the XArray API 3 * test_xarray.c: Test the XArray API 4 * Copyright (c) 2017-2018 Microsoft Corporati 4 * Copyright (c) 2017-2018 Microsoft Corporation 5 * Copyright (c) 2019-2020 Oracle 5 * Copyright (c) 2019-2020 Oracle 6 * Author: Matthew Wilcox <willy@infradead.org 6 * Author: Matthew Wilcox <willy@infradead.org> 7 */ 7 */ 8 8 9 #include <linux/xarray.h> 9 #include <linux/xarray.h> 10 #include <linux/module.h> 10 #include <linux/module.h> 11 11 12 static unsigned int tests_run; 12 static unsigned int tests_run; 13 static unsigned int tests_passed; 13 static unsigned int tests_passed; 14 14 15 static const unsigned int order_limit = 15 static const unsigned int order_limit = 16 IS_ENABLED(CONFIG_XARRAY_MULTI 16 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; 17 17 18 #ifndef XA_DEBUG 18 #ifndef XA_DEBUG 19 # ifdef __KERNEL__ 19 # ifdef __KERNEL__ 20 void xa_dump(const struct xarray *xa) { } 20 void xa_dump(const struct xarray *xa) { } 21 # endif 21 # endif 22 #undef XA_BUG_ON 22 #undef XA_BUG_ON 23 #define XA_BUG_ON(xa, x) do { 23 #define XA_BUG_ON(xa, x) do { \ 24 tests_run++; 24 tests_run++; \ 25 if (x) { 25 if (x) { \ 26 printk("BUG at %s:%d\n", __fun 26 printk("BUG at %s:%d\n", __func__, __LINE__); \ 27 xa_dump(xa); 27 xa_dump(xa); \ 28 dump_stack(); 28 dump_stack(); \ 29 } else { 29 } else { \ 30 tests_passed++; 30 tests_passed++; \ 31 } 31 } \ 32 } while (0) 32 } while (0) 33 #endif 33 #endif 34 34 35 static void *xa_mk_index(unsigned long index) 35 static void *xa_mk_index(unsigned long index) 36 { 36 { 37 return xa_mk_value(index & LONG_MAX); 37 return xa_mk_value(index & LONG_MAX); 38 } 38 } 39 39 40 static void *xa_store_index(struct xarray *xa, 40 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 41 { 41 { 42 return xa_store(xa, index, xa_mk_index 42 return xa_store(xa, index, xa_mk_index(index), gfp); 43 } 43 } 44 44 45 static void xa_insert_index(struct xarray *xa, 45 static void xa_insert_index(struct xarray *xa, unsigned long index) 46 { 46 { 47 XA_BUG_ON(xa, xa_insert(xa, index, xa_ 47 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), 48 GFP_KERNEL) != 48 GFP_KERNEL) != 0); 49 } 49 } 50 50 51 static void xa_alloc_index(struct xarray *xa, 51 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 52 { 52 { 53 u32 id; 53 u32 id; 54 54 55 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_ 55 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 56 gfp) != 0); 56 gfp) != 0); 57 XA_BUG_ON(xa, id != index); 57 XA_BUG_ON(xa, id != index); 58 } 58 } 59 59 60 static void xa_erase_index(struct xarray *xa, 60 static void xa_erase_index(struct xarray *xa, unsigned long index) 61 { 61 { 62 XA_BUG_ON(xa, xa_erase(xa, index) != x 62 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 63 XA_BUG_ON(xa, xa_load(xa, index) != NU 63 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 64 } 64 } 65 65 66 /* 66 /* 67 * If anyone needs this, please move it to xar 67 * If anyone needs this, please move it to xarray.c. We have no current 68 * users outside the test suite because all cu 68 * users outside the test suite because all current multislot users want 69 * to use the advanced API. 69 * to use the advanced API. 70 */ 70 */ 71 static void *xa_store_order(struct xarray *xa, 71 static void *xa_store_order(struct xarray *xa, unsigned long index, 72 unsigned order, void *entry, g 72 unsigned order, void *entry, gfp_t gfp) 73 { 73 { 74 XA_STATE_ORDER(xas, xa, index, order); 74 XA_STATE_ORDER(xas, xa, index, order); 75 void *curr; 75 void *curr; 76 76 77 do { 77 do { 78 xas_lock(&xas); 78 xas_lock(&xas); 79 curr = xas_store(&xas, entry); 79 curr = xas_store(&xas, entry); 80 xas_unlock(&xas); 80 xas_unlock(&xas); 81 } while (xas_nomem(&xas, gfp)); 81 } while (xas_nomem(&xas, gfp)); 82 82 83 return curr; 83 return curr; 84 } 84 } 85 85 86 static noinline void check_xa_err(struct xarra 86 static noinline void check_xa_err(struct xarray *xa) 87 { 87 { 88 XA_BUG_ON(xa, xa_err(xa_store_index(xa 88 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 89 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) 89 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 90 #ifndef __KERNEL__ 90 #ifndef __KERNEL__ 91 /* The kernel does not fail GFP_NOWAIT 91 /* The kernel does not fail GFP_NOWAIT allocations */ 92 XA_BUG_ON(xa, xa_err(xa_store_index(xa 92 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 93 XA_BUG_ON(xa, xa_err(xa_store_index(xa 93 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 94 #endif 94 #endif 95 XA_BUG_ON(xa, xa_err(xa_store_index(xa 95 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 96 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, x 96 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 97 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) 97 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 98 // kills the test-suite :-( 98 // kills the test-suite :-( 99 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, x 99 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 100 } 100 } 101 101 102 static noinline void check_xas_retry(struct xa 102 static noinline void check_xas_retry(struct xarray *xa) 103 { 103 { 104 XA_STATE(xas, xa, 0); 104 XA_STATE(xas, xa, 0); 105 void *entry; 105 void *entry; 106 106 107 xa_store_index(xa, 0, GFP_KERNEL); 107 xa_store_index(xa, 0, GFP_KERNEL); 108 xa_store_index(xa, 1, GFP_KERNEL); 108 xa_store_index(xa, 1, GFP_KERNEL); 109 109 110 rcu_read_lock(); 110 rcu_read_lock(); 111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX 111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 112 xa_erase_index(xa, 1); 112 xa_erase_index(xa, 1); 113 XA_BUG_ON(xa, !xa_is_retry(xas_reload( 113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_va 115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 116 xas_reset(&xas); 116 xas_reset(&xas); 117 XA_BUG_ON(xa, xas.xa_node != XAS_RESTA 117 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 118 XA_BUG_ON(xa, xas_next_entry(&xas, ULO 118 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 119 XA_BUG_ON(xa, xas.xa_node != NULL); 119 XA_BUG_ON(xa, xas.xa_node != NULL); 120 rcu_read_unlock(); 120 rcu_read_unlock(); 121 121 122 XA_BUG_ON(xa, xa_store_index(xa, 1, GF 122 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 123 123 124 rcu_read_lock(); 124 rcu_read_lock(); 125 XA_BUG_ON(xa, !xa_is_internal(xas_relo 125 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 126 xas.xa_node = XAS_RESTART; 126 xas.xa_node = XAS_RESTART; 127 XA_BUG_ON(xa, xas_next_entry(&xas, ULO 127 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 128 rcu_read_unlock(); 128 rcu_read_unlock(); 129 129 130 /* Make sure we can iterate through re 130 /* Make sure we can iterate through retry entries */ 131 xas_lock(&xas); 131 xas_lock(&xas); 132 xas_set(&xas, 0); 132 xas_set(&xas, 0); 133 xas_store(&xas, XA_RETRY_ENTRY); 133 xas_store(&xas, XA_RETRY_ENTRY); 134 xas_set(&xas, 1); 134 xas_set(&xas, 1); 135 xas_store(&xas, XA_RETRY_ENTRY); 135 xas_store(&xas, XA_RETRY_ENTRY); 136 136 137 xas_set(&xas, 0); 137 xas_set(&xas, 0); 138 xas_for_each(&xas, entry, ULONG_MAX) { 138 xas_for_each(&xas, entry, ULONG_MAX) { 139 xas_store(&xas, xa_mk_index(xa 139 xas_store(&xas, xa_mk_index(xas.xa_index)); 140 } 140 } 141 xas_unlock(&xas); 141 xas_unlock(&xas); 142 142 143 xa_erase_index(xa, 0); 143 xa_erase_index(xa, 0); 144 xa_erase_index(xa, 1); 144 xa_erase_index(xa, 1); 145 } 145 } 146 146 147 static noinline void check_xa_load(struct xarr 147 static noinline void check_xa_load(struct xarray *xa) 148 { 148 { 149 unsigned long i, j; 149 unsigned long i, j; 150 150 151 for (i = 0; i < 1024; i++) { 151 for (i = 0; i < 1024; i++) { 152 for (j = 0; j < 1024; j++) { 152 for (j = 0; j < 1024; j++) { 153 void *entry = xa_load( 153 void *entry = xa_load(xa, j); 154 if (j < i) 154 if (j < i) 155 XA_BUG_ON(xa, 155 XA_BUG_ON(xa, xa_to_value(entry) != j); 156 else 156 else 157 XA_BUG_ON(xa, 157 XA_BUG_ON(xa, entry); 158 } 158 } 159 XA_BUG_ON(xa, xa_store_index(x 159 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 160 } 160 } 161 161 162 for (i = 0; i < 1024; i++) { 162 for (i = 0; i < 1024; i++) { 163 for (j = 0; j < 1024; j++) { 163 for (j = 0; j < 1024; j++) { 164 void *entry = xa_load( 164 void *entry = xa_load(xa, j); 165 if (j >= i) 165 if (j >= i) 166 XA_BUG_ON(xa, 166 XA_BUG_ON(xa, xa_to_value(entry) != j); 167 else 167 else 168 XA_BUG_ON(xa, 168 XA_BUG_ON(xa, entry); 169 } 169 } 170 xa_erase_index(xa, i); 170 xa_erase_index(xa, i); 171 } 171 } 172 XA_BUG_ON(xa, !xa_empty(xa)); 172 XA_BUG_ON(xa, !xa_empty(xa)); 173 } 173 } 174 174 175 static noinline void check_xa_mark_1(struct xa 175 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 176 { 176 { 177 unsigned int order; 177 unsigned int order; 178 unsigned int max_order = IS_ENABLED(CO 178 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 179 179 180 /* NULL elements have no marks set */ 180 /* NULL elements have no marks set */ 181 XA_BUG_ON(xa, xa_get_mark(xa, index, X 181 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 182 xa_set_mark(xa, index, XA_MARK_0); 182 xa_set_mark(xa, index, XA_MARK_0); 183 XA_BUG_ON(xa, xa_get_mark(xa, index, X 183 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 184 184 185 /* Storing a pointer will not make a m 185 /* Storing a pointer will not make a mark appear */ 186 XA_BUG_ON(xa, xa_store_index(xa, index 186 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 187 XA_BUG_ON(xa, xa_get_mark(xa, index, X 187 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 188 xa_set_mark(xa, index, XA_MARK_0); 188 xa_set_mark(xa, index, XA_MARK_0); 189 XA_BUG_ON(xa, !xa_get_mark(xa, index, 189 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 190 190 191 /* Setting one mark will not set anoth 191 /* Setting one mark will not set another mark */ 192 XA_BUG_ON(xa, xa_get_mark(xa, index + 192 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 193 XA_BUG_ON(xa, xa_get_mark(xa, index, X 193 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 194 194 195 /* Storing NULL clears marks, and they 195 /* Storing NULL clears marks, and they can't be set again */ 196 xa_erase_index(xa, index); 196 xa_erase_index(xa, index); 197 XA_BUG_ON(xa, !xa_empty(xa)); 197 XA_BUG_ON(xa, !xa_empty(xa)); 198 XA_BUG_ON(xa, xa_get_mark(xa, index, X 198 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 199 xa_set_mark(xa, index, XA_MARK_0); 199 xa_set_mark(xa, index, XA_MARK_0); 200 XA_BUG_ON(xa, xa_get_mark(xa, index, X 200 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 201 201 202 /* 202 /* 203 * Storing a multi-index entry over en 203 * Storing a multi-index entry over entries with marks gives the 204 * entire entry the union of the marks 204 * entire entry the union of the marks 205 */ 205 */ 206 BUG_ON((index % 4) != 0); 206 BUG_ON((index % 4) != 0); 207 for (order = 2; order < max_order; ord 207 for (order = 2; order < max_order; order++) { 208 unsigned long base = round_dow 208 unsigned long base = round_down(index, 1UL << order); 209 unsigned long next = base + (1 209 unsigned long next = base + (1UL << order); 210 unsigned long i; 210 unsigned long i; 211 211 212 XA_BUG_ON(xa, xa_store_index(x 212 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 213 xa_set_mark(xa, index + 1, XA_ 213 xa_set_mark(xa, index + 1, XA_MARK_0); 214 XA_BUG_ON(xa, xa_store_index(x 214 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 215 xa_set_mark(xa, index + 2, XA_ 215 xa_set_mark(xa, index + 2, XA_MARK_2); 216 XA_BUG_ON(xa, xa_store_index(x 216 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 217 xa_store_order(xa, index, orde 217 xa_store_order(xa, index, order, xa_mk_index(index), 218 GFP_KERNEL); 218 GFP_KERNEL); 219 for (i = base; i < next; i++) 219 for (i = base; i < next; i++) { 220 XA_STATE(xas, xa, i); 220 XA_STATE(xas, xa, i); 221 unsigned int seen = 0; 221 unsigned int seen = 0; 222 void *entry; 222 void *entry; 223 223 224 XA_BUG_ON(xa, !xa_get_ 224 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 225 XA_BUG_ON(xa, xa_get_m 225 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 226 XA_BUG_ON(xa, !xa_get_ 226 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 227 227 228 /* We should see two e 228 /* We should see two elements in the array */ 229 rcu_read_lock(); 229 rcu_read_lock(); 230 xas_for_each(&xas, ent 230 xas_for_each(&xas, entry, ULONG_MAX) 231 seen++; 231 seen++; 232 rcu_read_unlock(); 232 rcu_read_unlock(); 233 XA_BUG_ON(xa, seen != 233 XA_BUG_ON(xa, seen != 2); 234 234 235 /* One of which is mar 235 /* One of which is marked */ 236 xas_set(&xas, 0); 236 xas_set(&xas, 0); 237 seen = 0; 237 seen = 0; 238 rcu_read_lock(); 238 rcu_read_lock(); 239 xas_for_each_marked(&x 239 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 240 seen++; 240 seen++; 241 rcu_read_unlock(); 241 rcu_read_unlock(); 242 XA_BUG_ON(xa, seen != 242 XA_BUG_ON(xa, seen != 1); 243 } 243 } 244 XA_BUG_ON(xa, xa_get_mark(xa, 244 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 245 XA_BUG_ON(xa, xa_get_mark(xa, 245 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 246 XA_BUG_ON(xa, xa_get_mark(xa, 246 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 247 xa_erase_index(xa, index); 247 xa_erase_index(xa, index); 248 xa_erase_index(xa, next); 248 xa_erase_index(xa, next); 249 XA_BUG_ON(xa, !xa_empty(xa)); 249 XA_BUG_ON(xa, !xa_empty(xa)); 250 } 250 } 251 XA_BUG_ON(xa, !xa_empty(xa)); 251 XA_BUG_ON(xa, !xa_empty(xa)); 252 } 252 } 253 253 254 static noinline void check_xa_mark_2(struct xa 254 static noinline void check_xa_mark_2(struct xarray *xa) 255 { 255 { 256 XA_STATE(xas, xa, 0); 256 XA_STATE(xas, xa, 0); 257 unsigned long index; 257 unsigned long index; 258 unsigned int count = 0; 258 unsigned int count = 0; 259 void *entry; 259 void *entry; 260 260 261 xa_store_index(xa, 0, GFP_KERNEL); 261 xa_store_index(xa, 0, GFP_KERNEL); 262 xa_set_mark(xa, 0, XA_MARK_0); 262 xa_set_mark(xa, 0, XA_MARK_0); 263 xas_lock(&xas); 263 xas_lock(&xas); 264 xas_load(&xas); 264 xas_load(&xas); 265 xas_init_marks(&xas); 265 xas_init_marks(&xas); 266 xas_unlock(&xas); 266 xas_unlock(&xas); 267 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_M 267 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 268 268 269 for (index = 3500; index < 4500; index 269 for (index = 3500; index < 4500; index++) { 270 xa_store_index(xa, index, GFP_ 270 xa_store_index(xa, index, GFP_KERNEL); 271 xa_set_mark(xa, index, XA_MARK 271 xa_set_mark(xa, index, XA_MARK_0); 272 } 272 } 273 273 274 xas_reset(&xas); 274 xas_reset(&xas); 275 rcu_read_lock(); 275 rcu_read_lock(); 276 xas_for_each_marked(&xas, entry, ULONG 276 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 277 count++; 277 count++; 278 rcu_read_unlock(); 278 rcu_read_unlock(); 279 XA_BUG_ON(xa, count != 1000); 279 XA_BUG_ON(xa, count != 1000); 280 280 281 xas_lock(&xas); 281 xas_lock(&xas); 282 xas_for_each(&xas, entry, ULONG_MAX) { 282 xas_for_each(&xas, entry, ULONG_MAX) { 283 xas_init_marks(&xas); 283 xas_init_marks(&xas); 284 XA_BUG_ON(xa, !xa_get_mark(xa, 284 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 285 XA_BUG_ON(xa, !xas_get_mark(&x 285 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 286 } 286 } 287 xas_unlock(&xas); 287 xas_unlock(&xas); 288 288 289 xa_destroy(xa); 289 xa_destroy(xa); 290 } 290 } 291 291 292 static noinline void check_xa_mark_3(struct xa 292 static noinline void check_xa_mark_3(struct xarray *xa) 293 { 293 { 294 #ifdef CONFIG_XARRAY_MULTI 294 #ifdef CONFIG_XARRAY_MULTI 295 XA_STATE(xas, xa, 0x41); 295 XA_STATE(xas, xa, 0x41); 296 void *entry; 296 void *entry; 297 int count = 0; 297 int count = 0; 298 298 299 xa_store_order(xa, 0x40, 2, xa_mk_inde 299 xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); 300 xa_set_mark(xa, 0x41, XA_MARK_0); 300 xa_set_mark(xa, 0x41, XA_MARK_0); 301 301 302 rcu_read_lock(); 302 rcu_read_lock(); 303 xas_for_each_marked(&xas, entry, ULONG 303 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { 304 count++; 304 count++; 305 XA_BUG_ON(xa, entry != xa_mk_i 305 XA_BUG_ON(xa, entry != xa_mk_index(0x40)); 306 } 306 } 307 XA_BUG_ON(xa, count != 1); 307 XA_BUG_ON(xa, count != 1); 308 rcu_read_unlock(); 308 rcu_read_unlock(); 309 xa_destroy(xa); 309 xa_destroy(xa); 310 #endif 310 #endif 311 } 311 } 312 312 313 static noinline void check_xa_mark(struct xarr 313 static noinline void check_xa_mark(struct xarray *xa) 314 { 314 { 315 unsigned long index; 315 unsigned long index; 316 316 317 for (index = 0; index < 16384; index + 317 for (index = 0; index < 16384; index += 4) 318 check_xa_mark_1(xa, index); 318 check_xa_mark_1(xa, index); 319 319 320 check_xa_mark_2(xa); 320 check_xa_mark_2(xa); 321 check_xa_mark_3(xa); 321 check_xa_mark_3(xa); 322 } 322 } 323 323 324 static noinline void check_xa_shrink(struct xa 324 static noinline void check_xa_shrink(struct xarray *xa) 325 { 325 { 326 XA_STATE(xas, xa, 1); 326 XA_STATE(xas, xa, 1); 327 struct xa_node *node; 327 struct xa_node *node; 328 unsigned int order; 328 unsigned int order; 329 unsigned int max_order = IS_ENABLED(CO 329 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 330 330 331 XA_BUG_ON(xa, !xa_empty(xa)); 331 XA_BUG_ON(xa, !xa_empty(xa)); 332 XA_BUG_ON(xa, xa_store_index(xa, 0, GF 332 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 333 XA_BUG_ON(xa, xa_store_index(xa, 1, GF 333 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 334 334 335 /* 335 /* 336 * Check that erasing the entry at 1 s 336 * Check that erasing the entry at 1 shrinks the tree and properly 337 * marks the node as being deleted. 337 * marks the node as being deleted. 338 */ 338 */ 339 xas_lock(&xas); 339 xas_lock(&xas); 340 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_ 340 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 341 node = xas.xa_node; 341 node = xas.xa_node; 342 XA_BUG_ON(xa, xa_entry_locked(xa, node 342 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 343 XA_BUG_ON(xa, xas_store(&xas, NULL) != 343 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 344 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 344 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 345 XA_BUG_ON(xa, xas.xa_node != XAS_BOUND 345 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 346 XA_BUG_ON(xa, xa_entry_locked(xa, node 346 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 347 XA_BUG_ON(xa, xas_load(&xas) != NULL); 347 XA_BUG_ON(xa, xas_load(&xas) != NULL); 348 xas_unlock(&xas); 348 xas_unlock(&xas); 349 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_ 349 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 350 xa_erase_index(xa, 0); 350 xa_erase_index(xa, 0); 351 XA_BUG_ON(xa, !xa_empty(xa)); 351 XA_BUG_ON(xa, !xa_empty(xa)); 352 352 353 for (order = 0; order < max_order; ord 353 for (order = 0; order < max_order; order++) { 354 unsigned long max = (1UL << or 354 unsigned long max = (1UL << order) - 1; 355 xa_store_order(xa, 0, order, x 355 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 356 XA_BUG_ON(xa, xa_load(xa, max) 356 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 357 XA_BUG_ON(xa, xa_load(xa, max 357 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 358 rcu_read_lock(); 358 rcu_read_lock(); 359 node = xa_head(xa); 359 node = xa_head(xa); 360 rcu_read_unlock(); 360 rcu_read_unlock(); 361 XA_BUG_ON(xa, xa_store_index(x 361 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 362 NULL); 362 NULL); 363 rcu_read_lock(); 363 rcu_read_lock(); 364 XA_BUG_ON(xa, xa_head(xa) == n 364 XA_BUG_ON(xa, xa_head(xa) == node); 365 rcu_read_unlock(); 365 rcu_read_unlock(); 366 XA_BUG_ON(xa, xa_load(xa, max 366 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 367 xa_erase_index(xa, ULONG_MAX); 367 xa_erase_index(xa, ULONG_MAX); 368 XA_BUG_ON(xa, xa->xa_head != n 368 XA_BUG_ON(xa, xa->xa_head != node); 369 xa_erase_index(xa, 0); 369 xa_erase_index(xa, 0); 370 } 370 } 371 } 371 } 372 372 373 static noinline void check_insert(struct xarra 373 static noinline void check_insert(struct xarray *xa) 374 { 374 { 375 unsigned long i; 375 unsigned long i; 376 376 377 for (i = 0; i < 1024; i++) { 377 for (i = 0; i < 1024; i++) { 378 xa_insert_index(xa, i); 378 xa_insert_index(xa, i); 379 XA_BUG_ON(xa, xa_load(xa, i - 379 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); 380 XA_BUG_ON(xa, xa_load(xa, i + 380 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); 381 xa_erase_index(xa, i); 381 xa_erase_index(xa, i); 382 } 382 } 383 383 384 for (i = 10; i < BITS_PER_LONG; i++) { 384 for (i = 10; i < BITS_PER_LONG; i++) { 385 xa_insert_index(xa, 1UL << i); 385 xa_insert_index(xa, 1UL << i); 386 XA_BUG_ON(xa, xa_load(xa, (1UL 386 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); 387 XA_BUG_ON(xa, xa_load(xa, (1UL 387 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); 388 xa_erase_index(xa, 1UL << i); 388 xa_erase_index(xa, 1UL << i); 389 389 390 xa_insert_index(xa, (1UL << i) 390 xa_insert_index(xa, (1UL << i) - 1); 391 XA_BUG_ON(xa, xa_load(xa, (1UL 391 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); 392 XA_BUG_ON(xa, xa_load(xa, 1UL 392 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); 393 xa_erase_index(xa, (1UL << i) 393 xa_erase_index(xa, (1UL << i) - 1); 394 } 394 } 395 395 396 xa_insert_index(xa, ~0UL); 396 xa_insert_index(xa, ~0UL); 397 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL 397 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); 398 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NUL 398 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); 399 xa_erase_index(xa, ~0UL); 399 xa_erase_index(xa, ~0UL); 400 400 401 XA_BUG_ON(xa, !xa_empty(xa)); 401 XA_BUG_ON(xa, !xa_empty(xa)); 402 } 402 } 403 403 404 static noinline void check_cmpxchg(struct xarr 404 static noinline void check_cmpxchg(struct xarray *xa) 405 { 405 { 406 void *FIVE = xa_mk_value(5); 406 void *FIVE = xa_mk_value(5); 407 void *SIX = xa_mk_value(6); 407 void *SIX = xa_mk_value(6); 408 void *LOTS = xa_mk_value(12345678); 408 void *LOTS = xa_mk_value(12345678); 409 409 410 XA_BUG_ON(xa, !xa_empty(xa)); 410 XA_BUG_ON(xa, !xa_empty(xa)); 411 XA_BUG_ON(xa, xa_store_index(xa, 12345 411 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 412 XA_BUG_ON(xa, xa_insert(xa, 12345678, 412 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 413 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, 413 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 414 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, 414 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 415 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, 415 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 416 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, 416 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 417 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, 417 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 418 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, G 418 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); 419 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, 419 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); 420 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, G 420 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); 421 xa_erase_index(xa, 12345678); 421 xa_erase_index(xa, 12345678); 422 xa_erase_index(xa, 5); 422 xa_erase_index(xa, 5); 423 XA_BUG_ON(xa, !xa_empty(xa)); 423 XA_BUG_ON(xa, !xa_empty(xa)); 424 } 424 } 425 425 426 static noinline void check_cmpxchg_order(struc 426 static noinline void check_cmpxchg_order(struct xarray *xa) 427 { 427 { 428 #ifdef CONFIG_XARRAY_MULTI 428 #ifdef CONFIG_XARRAY_MULTI 429 void *FIVE = xa_mk_value(5); 429 void *FIVE = xa_mk_value(5); 430 unsigned int i, order = 3; 430 unsigned int i, order = 3; 431 431 432 XA_BUG_ON(xa, xa_store_order(xa, 0, or 432 XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL)); 433 433 434 /* Check entry FIVE has the order save 434 /* Check entry FIVE has the order saved */ 435 XA_BUG_ON(xa, xa_get_order(xa, xa_to_v 435 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order); 436 436 437 /* Check all the tied indexes have the 437 /* Check all the tied indexes have the same entry and order */ 438 for (i = 0; i < (1 << order); i++) { 438 for (i = 0; i < (1 << order); i++) { 439 XA_BUG_ON(xa, xa_load(xa, i) ! 439 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 440 XA_BUG_ON(xa, xa_get_order(xa, 440 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 441 } 441 } 442 442 443 /* Ensure that nothing is stored at in 443 /* Ensure that nothing is stored at index '1 << order' */ 444 XA_BUG_ON(xa, xa_load(xa, 1 << order) 444 XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL); 445 445 446 /* 446 /* 447 * Additionally, keep the node informa 447 * Additionally, keep the node information and the order at 448 * '1 << order' 448 * '1 << order' 449 */ 449 */ 450 XA_BUG_ON(xa, xa_store_order(xa, 1 << 450 XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL)); 451 for (i = (1 << order); i < (1 << order 451 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 452 XA_BUG_ON(xa, xa_load(xa, i) ! 452 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 453 XA_BUG_ON(xa, xa_get_order(xa, 453 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 454 } 454 } 455 455 456 /* Conditionally replace FIVE entry at 456 /* Conditionally replace FIVE entry at index '' with NULL */ 457 XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, 457 XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE); 458 458 459 /* Verify the order is lost at FIVE (a 459 /* Verify the order is lost at FIVE (and old) entries */ 460 XA_BUG_ON(xa, xa_get_order(xa, xa_to_v 460 XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0); 461 461 462 /* Verify the order and entries are lo 462 /* Verify the order and entries are lost in all the tied indexes */ 463 for (i = 0; i < (1 << order); i++) { 463 for (i = 0; i < (1 << order); i++) { 464 XA_BUG_ON(xa, xa_load(xa, i) ! 464 XA_BUG_ON(xa, xa_load(xa, i) != NULL); 465 XA_BUG_ON(xa, xa_get_order(xa, 465 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 466 } 466 } 467 467 468 /* Verify node and order are kept at ' 468 /* Verify node and order are kept at '1 << order' */ 469 for (i = (1 << order); i < (1 << order 469 for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) { 470 XA_BUG_ON(xa, xa_load(xa, i) ! 470 XA_BUG_ON(xa, xa_load(xa, i) != FIVE); 471 XA_BUG_ON(xa, xa_get_order(xa, 471 XA_BUG_ON(xa, xa_get_order(xa, i) != order); 472 } 472 } 473 473 474 xa_store_order(xa, 0, BITS_PER_LONG - 474 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 475 XA_BUG_ON(xa, !xa_empty(xa)); 475 XA_BUG_ON(xa, !xa_empty(xa)); 476 #endif 476 #endif 477 } 477 } 478 478 479 static noinline void check_reserve(struct xarr 479 static noinline void check_reserve(struct xarray *xa) 480 { 480 { 481 void *entry; 481 void *entry; 482 unsigned long index; 482 unsigned long index; 483 int count; 483 int count; 484 484 485 /* An array with a reserved entry is n 485 /* An array with a reserved entry is not empty */ 486 XA_BUG_ON(xa, !xa_empty(xa)); 486 XA_BUG_ON(xa, !xa_empty(xa)); 487 XA_BUG_ON(xa, xa_reserve(xa, 12345678, 487 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 488 XA_BUG_ON(xa, xa_empty(xa)); 488 XA_BUG_ON(xa, xa_empty(xa)); 489 XA_BUG_ON(xa, xa_load(xa, 12345678)); 489 XA_BUG_ON(xa, xa_load(xa, 12345678)); 490 xa_release(xa, 12345678); 490 xa_release(xa, 12345678); 491 XA_BUG_ON(xa, !xa_empty(xa)); 491 XA_BUG_ON(xa, !xa_empty(xa)); 492 492 493 /* Releasing a used entry does nothing 493 /* Releasing a used entry does nothing */ 494 XA_BUG_ON(xa, xa_reserve(xa, 12345678, 494 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 495 XA_BUG_ON(xa, xa_store_index(xa, 12345 495 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 496 xa_release(xa, 12345678); 496 xa_release(xa, 12345678); 497 xa_erase_index(xa, 12345678); 497 xa_erase_index(xa, 12345678); 498 XA_BUG_ON(xa, !xa_empty(xa)); 498 XA_BUG_ON(xa, !xa_empty(xa)); 499 499 500 /* cmpxchg sees a reserved entry as ZE 500 /* cmpxchg sees a reserved entry as ZERO */ 501 XA_BUG_ON(xa, xa_reserve(xa, 12345678, 501 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 502 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, 502 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 503 xa_mk_value(12 503 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 504 xa_release(xa, 12345678); 504 xa_release(xa, 12345678); 505 xa_erase_index(xa, 12345678); 505 xa_erase_index(xa, 12345678); 506 XA_BUG_ON(xa, !xa_empty(xa)); 506 XA_BUG_ON(xa, !xa_empty(xa)); 507 507 508 /* xa_insert treats it as busy */ 508 /* xa_insert treats it as busy */ 509 XA_BUG_ON(xa, xa_reserve(xa, 12345678, 509 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 510 XA_BUG_ON(xa, xa_insert(xa, 12345678, 510 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 511 -EBUSY); 511 -EBUSY); 512 XA_BUG_ON(xa, xa_empty(xa)); 512 XA_BUG_ON(xa, xa_empty(xa)); 513 XA_BUG_ON(xa, xa_erase(xa, 12345678) ! 513 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 514 XA_BUG_ON(xa, !xa_empty(xa)); 514 XA_BUG_ON(xa, !xa_empty(xa)); 515 515 516 /* Can iterate through a reserved entr 516 /* Can iterate through a reserved entry */ 517 xa_store_index(xa, 5, GFP_KERNEL); 517 xa_store_index(xa, 5, GFP_KERNEL); 518 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KE 518 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); 519 xa_store_index(xa, 7, GFP_KERNEL); 519 xa_store_index(xa, 7, GFP_KERNEL); 520 520 521 count = 0; 521 count = 0; 522 xa_for_each(xa, index, entry) { 522 xa_for_each(xa, index, entry) { 523 XA_BUG_ON(xa, index != 5 && in 523 XA_BUG_ON(xa, index != 5 && index != 7); 524 count++; 524 count++; 525 } 525 } 526 XA_BUG_ON(xa, count != 2); 526 XA_BUG_ON(xa, count != 2); 527 527 528 /* If we free a reserved entry, we sho 528 /* If we free a reserved entry, we should be able to allocate it */ 529 if (xa->xa_flags & XA_FLAGS_ALLOC) { 529 if (xa->xa_flags & XA_FLAGS_ALLOC) { 530 u32 id; 530 u32 id; 531 531 532 XA_BUG_ON(xa, xa_alloc(xa, &id 532 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), 533 XA_LIM 533 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 534 XA_BUG_ON(xa, id != 8); 534 XA_BUG_ON(xa, id != 8); 535 535 536 xa_release(xa, 6); 536 xa_release(xa, 6); 537 XA_BUG_ON(xa, xa_alloc(xa, &id 537 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), 538 XA_LIM 538 XA_LIMIT(5, 10), GFP_KERNEL) != 0); 539 XA_BUG_ON(xa, id != 6); 539 XA_BUG_ON(xa, id != 6); 540 } 540 } 541 541 542 xa_destroy(xa); 542 xa_destroy(xa); 543 } 543 } 544 544 545 static noinline void check_xas_erase(struct xa 545 static noinline void check_xas_erase(struct xarray *xa) 546 { 546 { 547 XA_STATE(xas, xa, 0); 547 XA_STATE(xas, xa, 0); 548 void *entry; 548 void *entry; 549 unsigned long i, j; 549 unsigned long i, j; 550 550 551 for (i = 0; i < 200; i++) { 551 for (i = 0; i < 200; i++) { 552 for (j = i; j < 2 * i + 17; j+ 552 for (j = i; j < 2 * i + 17; j++) { 553 xas_set(&xas, j); 553 xas_set(&xas, j); 554 do { 554 do { 555 xas_lock(&xas) 555 xas_lock(&xas); 556 xas_store(&xas 556 xas_store(&xas, xa_mk_index(j)); 557 xas_unlock(&xa 557 xas_unlock(&xas); 558 } while (xas_nomem(&xa 558 } while (xas_nomem(&xas, GFP_KERNEL)); 559 } 559 } 560 560 561 xas_set(&xas, ULONG_MAX); 561 xas_set(&xas, ULONG_MAX); 562 do { 562 do { 563 xas_lock(&xas); 563 xas_lock(&xas); 564 xas_store(&xas, xa_mk_ 564 xas_store(&xas, xa_mk_value(0)); 565 xas_unlock(&xas); 565 xas_unlock(&xas); 566 } while (xas_nomem(&xas, GFP_K 566 } while (xas_nomem(&xas, GFP_KERNEL)); 567 567 568 xas_lock(&xas); 568 xas_lock(&xas); 569 xas_store(&xas, NULL); 569 xas_store(&xas, NULL); 570 570 571 xas_set(&xas, 0); 571 xas_set(&xas, 0); 572 j = i; 572 j = i; 573 xas_for_each(&xas, entry, ULON 573 xas_for_each(&xas, entry, ULONG_MAX) { 574 XA_BUG_ON(xa, entry != 574 XA_BUG_ON(xa, entry != xa_mk_index(j)); 575 xas_store(&xas, NULL); 575 xas_store(&xas, NULL); 576 j++; 576 j++; 577 } 577 } 578 xas_unlock(&xas); 578 xas_unlock(&xas); 579 XA_BUG_ON(xa, !xa_empty(xa)); 579 XA_BUG_ON(xa, !xa_empty(xa)); 580 } 580 } 581 } 581 } 582 582 583 #ifdef CONFIG_XARRAY_MULTI 583 #ifdef CONFIG_XARRAY_MULTI 584 static noinline void check_multi_store_1(struc 584 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 585 unsigned int order) 585 unsigned int order) 586 { 586 { 587 XA_STATE(xas, xa, index); 587 XA_STATE(xas, xa, index); 588 unsigned long min = index & ~((1UL << 588 unsigned long min = index & ~((1UL << order) - 1); 589 unsigned long max = min + (1UL << orde 589 unsigned long max = min + (1UL << order); 590 590 591 xa_store_order(xa, index, order, xa_mk 591 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 592 XA_BUG_ON(xa, xa_load(xa, min) != xa_m 592 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 593 XA_BUG_ON(xa, xa_load(xa, max - 1) != 593 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 594 XA_BUG_ON(xa, xa_load(xa, max) != NULL 594 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 595 XA_BUG_ON(xa, xa_load(xa, min - 1) != 595 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 596 596 597 xas_lock(&xas); 597 xas_lock(&xas); 598 XA_BUG_ON(xa, xas_store(&xas, xa_mk_in 598 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 599 xas_unlock(&xas); 599 xas_unlock(&xas); 600 XA_BUG_ON(xa, xa_load(xa, min) != xa_m 600 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 601 XA_BUG_ON(xa, xa_load(xa, max - 1) != 601 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 602 XA_BUG_ON(xa, xa_load(xa, max) != NULL 602 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 603 XA_BUG_ON(xa, xa_load(xa, min - 1) != 603 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 604 604 605 xa_erase_index(xa, min); 605 xa_erase_index(xa, min); 606 XA_BUG_ON(xa, !xa_empty(xa)); 606 XA_BUG_ON(xa, !xa_empty(xa)); 607 } 607 } 608 608 609 static noinline void check_multi_store_2(struc 609 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 610 unsigned int order) 610 unsigned int order) 611 { 611 { 612 XA_STATE(xas, xa, index); 612 XA_STATE(xas, xa, index); 613 xa_store_order(xa, index, order, xa_mk 613 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 614 614 615 xas_lock(&xas); 615 xas_lock(&xas); 616 XA_BUG_ON(xa, xas_store(&xas, xa_mk_va 616 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 617 XA_BUG_ON(xa, xas.xa_index != index); 617 XA_BUG_ON(xa, xas.xa_index != index); 618 XA_BUG_ON(xa, xas_store(&xas, NULL) != 618 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 619 xas_unlock(&xas); 619 xas_unlock(&xas); 620 XA_BUG_ON(xa, !xa_empty(xa)); 620 XA_BUG_ON(xa, !xa_empty(xa)); 621 } 621 } 622 622 623 static noinline void check_multi_store_3(struc 623 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 624 unsigned int order) 624 unsigned int order) 625 { 625 { 626 XA_STATE(xas, xa, 0); 626 XA_STATE(xas, xa, 0); 627 void *entry; 627 void *entry; 628 int n = 0; 628 int n = 0; 629 629 630 xa_store_order(xa, index, order, xa_mk 630 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 631 631 632 xas_lock(&xas); 632 xas_lock(&xas); 633 xas_for_each(&xas, entry, ULONG_MAX) { 633 xas_for_each(&xas, entry, ULONG_MAX) { 634 XA_BUG_ON(xa, entry != xa_mk_i 634 XA_BUG_ON(xa, entry != xa_mk_index(index)); 635 n++; 635 n++; 636 } 636 } 637 XA_BUG_ON(xa, n != 1); 637 XA_BUG_ON(xa, n != 1); 638 xas_set(&xas, index + 1); 638 xas_set(&xas, index + 1); 639 xas_for_each(&xas, entry, ULONG_MAX) { 639 xas_for_each(&xas, entry, ULONG_MAX) { 640 XA_BUG_ON(xa, entry != xa_mk_i 640 XA_BUG_ON(xa, entry != xa_mk_index(index)); 641 n++; 641 n++; 642 } 642 } 643 XA_BUG_ON(xa, n != 2); 643 XA_BUG_ON(xa, n != 2); 644 xas_unlock(&xas); 644 xas_unlock(&xas); 645 645 646 xa_destroy(xa); 646 xa_destroy(xa); 647 } 647 } 648 #endif 648 #endif 649 649 650 static noinline void check_multi_store(struct 650 static noinline void check_multi_store(struct xarray *xa) 651 { 651 { 652 #ifdef CONFIG_XARRAY_MULTI 652 #ifdef CONFIG_XARRAY_MULTI 653 unsigned long i, j, k; 653 unsigned long i, j, k; 654 unsigned int max_order = (sizeof(long) 654 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 655 655 656 /* Loading from any position returns t 656 /* Loading from any position returns the same value */ 657 xa_store_order(xa, 0, 1, xa_mk_value(0 657 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 658 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_ 658 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 659 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_ 659 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 660 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 660 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 661 rcu_read_lock(); 661 rcu_read_lock(); 662 XA_BUG_ON(xa, xa_to_node(xa_head(xa))- 662 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 663 XA_BUG_ON(xa, xa_to_node(xa_head(xa))- 663 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 664 rcu_read_unlock(); 664 rcu_read_unlock(); 665 665 666 /* Storing adjacent to the value does 666 /* Storing adjacent to the value does not alter the value */ 667 xa_store(xa, 3, xa, GFP_KERNEL); 667 xa_store(xa, 3, xa, GFP_KERNEL); 668 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_ 668 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 669 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_ 669 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 670 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 670 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 671 rcu_read_lock(); 671 rcu_read_lock(); 672 XA_BUG_ON(xa, xa_to_node(xa_head(xa))- 672 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 673 XA_BUG_ON(xa, xa_to_node(xa_head(xa))- 673 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 674 rcu_read_unlock(); 674 rcu_read_unlock(); 675 675 676 /* Overwriting multiple indexes works 676 /* Overwriting multiple indexes works */ 677 xa_store_order(xa, 0, 2, xa_mk_value(1 677 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 678 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_ 678 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 679 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_ 679 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 680 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_ 680 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 681 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_ 681 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 682 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 682 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 683 rcu_read_lock(); 683 rcu_read_lock(); 684 XA_BUG_ON(xa, xa_to_node(xa_head(xa))- 684 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 685 XA_BUG_ON(xa, xa_to_node(xa_head(xa))- 685 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 686 rcu_read_unlock(); 686 rcu_read_unlock(); 687 687 688 /* We can erase multiple values with a 688 /* We can erase multiple values with a single store */ 689 xa_store_order(xa, 0, BITS_PER_LONG - 689 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 690 XA_BUG_ON(xa, !xa_empty(xa)); 690 XA_BUG_ON(xa, !xa_empty(xa)); 691 691 692 /* Even when the first slot is empty b 692 /* Even when the first slot is empty but the others aren't */ 693 xa_store_index(xa, 1, GFP_KERNEL); 693 xa_store_index(xa, 1, GFP_KERNEL); 694 xa_store_index(xa, 2, GFP_KERNEL); 694 xa_store_index(xa, 2, GFP_KERNEL); 695 xa_store_order(xa, 0, 2, NULL, GFP_KER 695 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 696 XA_BUG_ON(xa, !xa_empty(xa)); 696 XA_BUG_ON(xa, !xa_empty(xa)); 697 697 698 for (i = 0; i < max_order; i++) { 698 for (i = 0; i < max_order; i++) { 699 for (j = 0; j < max_order; j++ 699 for (j = 0; j < max_order; j++) { 700 xa_store_order(xa, 0, 700 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 701 xa_store_order(xa, 0, 701 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 702 702 703 for (k = 0; k < max_or 703 for (k = 0; k < max_order; k++) { 704 void *entry = 704 void *entry = xa_load(xa, (1UL << k) - 1); 705 if ((i < k) && 705 if ((i < k) && (j < k)) 706 XA_BUG 706 XA_BUG_ON(xa, entry != NULL); 707 else 707 else 708 XA_BUG 708 XA_BUG_ON(xa, entry != xa_mk_index(j)); 709 } 709 } 710 710 711 xa_erase(xa, 0); 711 xa_erase(xa, 0); 712 XA_BUG_ON(xa, !xa_empt 712 XA_BUG_ON(xa, !xa_empty(xa)); 713 } 713 } 714 } 714 } 715 715 716 for (i = 0; i < 20; i++) { 716 for (i = 0; i < 20; i++) { 717 check_multi_store_1(xa, 200, i 717 check_multi_store_1(xa, 200, i); 718 check_multi_store_1(xa, 0, i); 718 check_multi_store_1(xa, 0, i); 719 check_multi_store_1(xa, (1UL < 719 check_multi_store_1(xa, (1UL << i) + 1, i); 720 } 720 } 721 check_multi_store_2(xa, 4095, 9); 721 check_multi_store_2(xa, 4095, 9); 722 722 723 for (i = 1; i < 20; i++) { 723 for (i = 1; i < 20; i++) { 724 check_multi_store_3(xa, 0, i); 724 check_multi_store_3(xa, 0, i); 725 check_multi_store_3(xa, 1UL << 725 check_multi_store_3(xa, 1UL << i, i); 726 } 726 } 727 #endif 727 #endif 728 } 728 } 729 729 730 #ifdef CONFIG_XARRAY_MULTI 730 #ifdef CONFIG_XARRAY_MULTI 731 /* mimics page cache __filemap_add_folio() */ 731 /* mimics page cache __filemap_add_folio() */ 732 static noinline void check_xa_multi_store_adv_ 732 static noinline void check_xa_multi_store_adv_add(struct xarray *xa, 733 733 unsigned long index, 734 734 unsigned int order, 735 735 void *p) 736 { 736 { 737 XA_STATE(xas, xa, index); 737 XA_STATE(xas, xa, index); 738 unsigned int nrpages = 1UL << order; 738 unsigned int nrpages = 1UL << order; 739 739 740 /* users are responsible for index ali 740 /* users are responsible for index alignemnt to the order when adding */ 741 XA_BUG_ON(xa, index & (nrpages - 1)); 741 XA_BUG_ON(xa, index & (nrpages - 1)); 742 742 743 xas_set_order(&xas, index, order); 743 xas_set_order(&xas, index, order); 744 744 745 do { 745 do { 746 xas_lock_irq(&xas); 746 xas_lock_irq(&xas); 747 xas_store(&xas, p); 747 xas_store(&xas, p); 748 xas_unlock_irq(&xas); 748 xas_unlock_irq(&xas); 749 /* 749 /* 750 * In our selftest case the on 750 * In our selftest case the only failure we can expect is for 751 * there not to be enough memo 751 * there not to be enough memory as we're not mimicking the 752 * entire page cache, so verif 752 * entire page cache, so verify that's the only error we can run 753 * into here. The xas_nomem() 753 * into here. The xas_nomem() which follows will ensure to fix 754 * that condition for us so to 754 * that condition for us so to chug on on the loop. 755 */ 755 */ 756 XA_BUG_ON(xa, xas_error(&xas) 756 XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM); 757 } while (xas_nomem(&xas, GFP_KERNEL)); 757 } while (xas_nomem(&xas, GFP_KERNEL)); 758 758 759 XA_BUG_ON(xa, xas_error(&xas)); 759 XA_BUG_ON(xa, xas_error(&xas)); 760 XA_BUG_ON(xa, xa_load(xa, index) != p) 760 XA_BUG_ON(xa, xa_load(xa, index) != p); 761 } 761 } 762 762 763 /* mimics page_cache_delete() */ 763 /* mimics page_cache_delete() */ 764 static noinline void check_xa_multi_store_adv_ 764 static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, 765 765 unsigned long index, 766 766 unsigned int order) 767 { 767 { 768 XA_STATE(xas, xa, index); 768 XA_STATE(xas, xa, index); 769 769 770 xas_set_order(&xas, index, order); 770 xas_set_order(&xas, index, order); 771 xas_store(&xas, NULL); 771 xas_store(&xas, NULL); 772 xas_init_marks(&xas); 772 xas_init_marks(&xas); 773 } 773 } 774 774 775 static noinline void check_xa_multi_store_adv_ 775 static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, 776 776 unsigned long index, 777 777 unsigned int order) 778 { 778 { 779 xa_lock_irq(xa); 779 xa_lock_irq(xa); 780 check_xa_multi_store_adv_del_entry(xa, 780 check_xa_multi_store_adv_del_entry(xa, index, order); 781 xa_unlock_irq(xa); 781 xa_unlock_irq(xa); 782 } 782 } 783 783 784 /* mimics page cache filemap_get_entry() */ 784 /* mimics page cache filemap_get_entry() */ 785 static noinline void *test_get_entry(struct xa 785 static noinline void *test_get_entry(struct xarray *xa, unsigned long index) 786 { 786 { 787 XA_STATE(xas, xa, index); 787 XA_STATE(xas, xa, index); 788 void *p; 788 void *p; 789 static unsigned int loops = 0; 789 static unsigned int loops = 0; 790 790 791 rcu_read_lock(); 791 rcu_read_lock(); 792 repeat: 792 repeat: 793 xas_reset(&xas); 793 xas_reset(&xas); 794 p = xas_load(&xas); 794 p = xas_load(&xas); 795 if (xas_retry(&xas, p)) 795 if (xas_retry(&xas, p)) 796 goto repeat; 796 goto repeat; 797 rcu_read_unlock(); 797 rcu_read_unlock(); 798 798 799 /* 799 /* 800 * This is not part of the page cache, 800 * This is not part of the page cache, this selftest is pretty 801 * aggressive and does not want to tru 801 * aggressive and does not want to trust the xarray API but rather 802 * test it, and for order 20 (4 GiB bl 802 * test it, and for order 20 (4 GiB block size) we can loop over 803 * over a million entries which can ca 803 * over a million entries which can cause a soft lockup. Page cache 804 * APIs won't be stupid, proper page c 804 * APIs won't be stupid, proper page cache APIs loop over the proper 805 * order so when using a larger order 805 * order so when using a larger order we skip shared entries. 806 */ 806 */ 807 if (++loops % XA_CHECK_SCHED == 0) 807 if (++loops % XA_CHECK_SCHED == 0) 808 schedule(); 808 schedule(); 809 809 810 return p; 810 return p; 811 } 811 } 812 812 813 static unsigned long some_val = 0xdeadbeef; 813 static unsigned long some_val = 0xdeadbeef; 814 static unsigned long some_val_2 = 0xdeaddead; 814 static unsigned long some_val_2 = 0xdeaddead; 815 815 816 /* mimics the page cache usage */ 816 /* mimics the page cache usage */ 817 static noinline void check_xa_multi_store_adv( 817 static noinline void check_xa_multi_store_adv(struct xarray *xa, 818 818 unsigned long pos, 819 819 unsigned int order) 820 { 820 { 821 unsigned int nrpages = 1UL << order; 821 unsigned int nrpages = 1UL << order; 822 unsigned long index, base, next_index, 822 unsigned long index, base, next_index, next_next_index; 823 unsigned int i; 823 unsigned int i; 824 824 825 index = pos >> PAGE_SHIFT; 825 index = pos >> PAGE_SHIFT; 826 base = round_down(index, nrpages); 826 base = round_down(index, nrpages); 827 next_index = round_down(base + nrpages 827 next_index = round_down(base + nrpages, nrpages); 828 next_next_index = round_down(next_inde 828 next_next_index = round_down(next_index + nrpages, nrpages); 829 829 830 check_xa_multi_store_adv_add(xa, base, 830 check_xa_multi_store_adv_add(xa, base, order, &some_val); 831 831 832 for (i = 0; i < nrpages; i++) 832 for (i = 0; i < nrpages; i++) 833 XA_BUG_ON(xa, test_get_entry(x 833 XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); 834 834 835 XA_BUG_ON(xa, test_get_entry(xa, next_ 835 XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); 836 836 837 /* Use order 0 for the next item */ 837 /* Use order 0 for the next item */ 838 check_xa_multi_store_adv_add(xa, next_ 838 check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); 839 XA_BUG_ON(xa, test_get_entry(xa, next_ 839 XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); 840 840 841 /* Remove the next item */ 841 /* Remove the next item */ 842 check_xa_multi_store_adv_delete(xa, ne 842 check_xa_multi_store_adv_delete(xa, next_index, 0); 843 843 844 /* Now use order for a new pointer */ 844 /* Now use order for a new pointer */ 845 check_xa_multi_store_adv_add(xa, next_ 845 check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); 846 846 847 for (i = 0; i < nrpages; i++) 847 for (i = 0; i < nrpages; i++) 848 XA_BUG_ON(xa, test_get_entry(x 848 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 849 849 850 check_xa_multi_store_adv_delete(xa, ne 850 check_xa_multi_store_adv_delete(xa, next_index, order); 851 check_xa_multi_store_adv_delete(xa, ba 851 check_xa_multi_store_adv_delete(xa, base, order); 852 XA_BUG_ON(xa, !xa_empty(xa)); 852 XA_BUG_ON(xa, !xa_empty(xa)); 853 853 854 /* starting fresh again */ 854 /* starting fresh again */ 855 855 856 /* let's test some holes now */ 856 /* let's test some holes now */ 857 857 858 /* hole at base and next_next */ 858 /* hole at base and next_next */ 859 check_xa_multi_store_adv_add(xa, next_ 859 check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); 860 860 861 for (i = 0; i < nrpages; i++) 861 for (i = 0; i < nrpages; i++) 862 XA_BUG_ON(xa, test_get_entry(x 862 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 863 863 864 for (i = 0; i < nrpages; i++) 864 for (i = 0; i < nrpages; i++) 865 XA_BUG_ON(xa, test_get_entry(x 865 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 866 866 867 for (i = 0; i < nrpages; i++) 867 for (i = 0; i < nrpages; i++) 868 XA_BUG_ON(xa, test_get_entry(x 868 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); 869 869 870 check_xa_multi_store_adv_delete(xa, ne 870 check_xa_multi_store_adv_delete(xa, next_index, order); 871 XA_BUG_ON(xa, !xa_empty(xa)); 871 XA_BUG_ON(xa, !xa_empty(xa)); 872 872 873 /* hole at base and next */ 873 /* hole at base and next */ 874 874 875 check_xa_multi_store_adv_add(xa, next_ 875 check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); 876 876 877 for (i = 0; i < nrpages; i++) 877 for (i = 0; i < nrpages; i++) 878 XA_BUG_ON(xa, test_get_entry(x 878 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); 879 879 880 for (i = 0; i < nrpages; i++) 880 for (i = 0; i < nrpages; i++) 881 XA_BUG_ON(xa, test_get_entry(x 881 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL); 882 882 883 for (i = 0; i < nrpages; i++) 883 for (i = 0; i < nrpages; i++) 884 XA_BUG_ON(xa, test_get_entry(x 884 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); 885 885 886 check_xa_multi_store_adv_delete(xa, ne 886 check_xa_multi_store_adv_delete(xa, next_next_index, order); 887 XA_BUG_ON(xa, !xa_empty(xa)); 887 XA_BUG_ON(xa, !xa_empty(xa)); 888 } 888 } 889 #endif 889 #endif 890 890 891 static noinline void check_multi_store_advance 891 static noinline void check_multi_store_advanced(struct xarray *xa) 892 { 892 { 893 #ifdef CONFIG_XARRAY_MULTI 893 #ifdef CONFIG_XARRAY_MULTI 894 unsigned int max_order = IS_ENABLED(CO 894 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 895 unsigned long end = ULONG_MAX/2; 895 unsigned long end = ULONG_MAX/2; 896 unsigned long pos, i; 896 unsigned long pos, i; 897 897 898 /* 898 /* 899 * About 117 million tests below. 899 * About 117 million tests below. 900 */ 900 */ 901 for (pos = 7; pos < end; pos = (pos * 901 for (pos = 7; pos < end; pos = (pos * pos) + 564) { 902 for (i = 0; i < max_order; i++ 902 for (i = 0; i < max_order; i++) { 903 check_xa_multi_store_a 903 check_xa_multi_store_adv(xa, pos, i); 904 check_xa_multi_store_a 904 check_xa_multi_store_adv(xa, pos + 157, i); 905 } 905 } 906 } 906 } 907 #endif 907 #endif 908 } 908 } 909 909 910 static noinline void check_xa_alloc_1(struct x 910 static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) 911 { 911 { 912 int i; 912 int i; 913 u32 id; 913 u32 id; 914 914 915 XA_BUG_ON(xa, !xa_empty(xa)); 915 XA_BUG_ON(xa, !xa_empty(xa)); 916 /* An empty array should assign %base 916 /* An empty array should assign %base to the first alloc */ 917 xa_alloc_index(xa, base, GFP_KERNEL); 917 xa_alloc_index(xa, base, GFP_KERNEL); 918 918 919 /* Erasing it should make the array em 919 /* Erasing it should make the array empty again */ 920 xa_erase_index(xa, base); 920 xa_erase_index(xa, base); 921 XA_BUG_ON(xa, !xa_empty(xa)); 921 XA_BUG_ON(xa, !xa_empty(xa)); 922 922 923 /* And it should assign %base again */ 923 /* And it should assign %base again */ 924 xa_alloc_index(xa, base, GFP_KERNEL); 924 xa_alloc_index(xa, base, GFP_KERNEL); 925 925 926 /* Allocating and then erasing a lot s 926 /* Allocating and then erasing a lot should not lose base */ 927 for (i = base + 1; i < 2 * XA_CHUNK_SI 927 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 928 xa_alloc_index(xa, i, GFP_KERN 928 xa_alloc_index(xa, i, GFP_KERNEL); 929 for (i = base; i < 2 * XA_CHUNK_SIZE; 929 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 930 xa_erase_index(xa, i); 930 xa_erase_index(xa, i); 931 xa_alloc_index(xa, base, GFP_KERNEL); 931 xa_alloc_index(xa, base, GFP_KERNEL); 932 932 933 /* Destroying the array should do the 933 /* Destroying the array should do the same as erasing */ 934 xa_destroy(xa); 934 xa_destroy(xa); 935 935 936 /* And it should assign %base again */ 936 /* And it should assign %base again */ 937 xa_alloc_index(xa, base, GFP_KERNEL); 937 xa_alloc_index(xa, base, GFP_KERNEL); 938 938 939 /* The next assigned ID should be base 939 /* The next assigned ID should be base+1 */ 940 xa_alloc_index(xa, base + 1, GFP_KERNE 940 xa_alloc_index(xa, base + 1, GFP_KERNEL); 941 xa_erase_index(xa, base + 1); 941 xa_erase_index(xa, base + 1); 942 942 943 /* Storing a value should mark it used 943 /* Storing a value should mark it used */ 944 xa_store_index(xa, base + 1, GFP_KERNE 944 xa_store_index(xa, base + 1, GFP_KERNEL); 945 xa_alloc_index(xa, base + 2, GFP_KERNE 945 xa_alloc_index(xa, base + 2, GFP_KERNEL); 946 946 947 /* If we then erase base, it should be 947 /* If we then erase base, it should be free */ 948 xa_erase_index(xa, base); 948 xa_erase_index(xa, base); 949 xa_alloc_index(xa, base, GFP_KERNEL); 949 xa_alloc_index(xa, base, GFP_KERNEL); 950 950 951 xa_erase_index(xa, base + 1); 951 xa_erase_index(xa, base + 1); 952 xa_erase_index(xa, base + 2); 952 xa_erase_index(xa, base + 2); 953 953 954 for (i = 1; i < 5000; i++) { 954 for (i = 1; i < 5000; i++) { 955 xa_alloc_index(xa, base + i, G 955 xa_alloc_index(xa, base + i, GFP_KERNEL); 956 } 956 } 957 957 958 xa_destroy(xa); 958 xa_destroy(xa); 959 959 960 /* Check that we fail properly at the 960 /* Check that we fail properly at the limit of allocation */ 961 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_ 961 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 962 XA_LIMIT(UINT_ 962 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 963 GFP_KERNEL) != 963 GFP_KERNEL) != 0); 964 XA_BUG_ON(xa, id != 0xfffffffeU); 964 XA_BUG_ON(xa, id != 0xfffffffeU); 965 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_ 965 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 966 XA_LIMIT(UINT_ 966 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 967 GFP_KERNEL) != 967 GFP_KERNEL) != 0); 968 XA_BUG_ON(xa, id != 0xffffffffU); 968 XA_BUG_ON(xa, id != 0xffffffffU); 969 id = 3; 969 id = 3; 970 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_ 970 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 971 XA_LIMIT(UINT_ 971 XA_LIMIT(UINT_MAX - 1, UINT_MAX), 972 GFP_KERNEL) != 972 GFP_KERNEL) != -EBUSY); 973 XA_BUG_ON(xa, id != 3); 973 XA_BUG_ON(xa, id != 3); 974 xa_destroy(xa); 974 xa_destroy(xa); 975 975 976 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_ 976 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 977 GFP_KERNEL) != 977 GFP_KERNEL) != -EBUSY); 978 XA_BUG_ON(xa, xa_store_index(xa, 3, GF 978 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); 979 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_ 979 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 980 GFP_KERNEL) != 980 GFP_KERNEL) != -EBUSY); 981 xa_erase_index(xa, 3); 981 xa_erase_index(xa, 3); 982 XA_BUG_ON(xa, !xa_empty(xa)); 982 XA_BUG_ON(xa, !xa_empty(xa)); 983 } 983 } 984 984 985 static noinline void check_xa_alloc_2(struct x 985 static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) 986 { 986 { 987 unsigned int i, id; 987 unsigned int i, id; 988 unsigned long index; 988 unsigned long index; 989 void *entry; 989 void *entry; 990 990 991 /* Allocate and free a NULL and check 991 /* Allocate and free a NULL and check xa_empty() behaves */ 992 XA_BUG_ON(xa, !xa_empty(xa)); 992 XA_BUG_ON(xa, !xa_empty(xa)); 993 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, 993 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 994 XA_BUG_ON(xa, id != base); 994 XA_BUG_ON(xa, id != base); 995 XA_BUG_ON(xa, xa_empty(xa)); 995 XA_BUG_ON(xa, xa_empty(xa)); 996 XA_BUG_ON(xa, xa_erase(xa, id) != NULL 996 XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 997 XA_BUG_ON(xa, !xa_empty(xa)); 997 XA_BUG_ON(xa, !xa_empty(xa)); 998 998 999 /* Ditto, but check destroy instead of 999 /* Ditto, but check destroy instead of erase */ 1000 XA_BUG_ON(xa, !xa_empty(xa)); 1000 XA_BUG_ON(xa, !xa_empty(xa)); 1001 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, 1001 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1002 XA_BUG_ON(xa, id != base); 1002 XA_BUG_ON(xa, id != base); 1003 XA_BUG_ON(xa, xa_empty(xa)); 1003 XA_BUG_ON(xa, xa_empty(xa)); 1004 xa_destroy(xa); 1004 xa_destroy(xa); 1005 XA_BUG_ON(xa, !xa_empty(xa)); 1005 XA_BUG_ON(xa, !xa_empty(xa)); 1006 1006 1007 for (i = base; i < base + 10; i++) { 1007 for (i = base; i < base + 10; i++) { 1008 XA_BUG_ON(xa, xa_alloc(xa, &i 1008 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 1009 GFP_K 1009 GFP_KERNEL) != 0); 1010 XA_BUG_ON(xa, id != i); 1010 XA_BUG_ON(xa, id != i); 1011 } 1011 } 1012 1012 1013 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_i 1013 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 1014 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_i 1014 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 1015 XA_BUG_ON(xa, xa_store(xa, 4, NULL, G 1015 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 1016 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL 1016 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 1017 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, 1017 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 1018 XA_BUG_ON(xa, id != 5); 1018 XA_BUG_ON(xa, id != 5); 1019 1019 1020 xa_for_each(xa, index, entry) { 1020 xa_for_each(xa, index, entry) { 1021 xa_erase_index(xa, index); 1021 xa_erase_index(xa, index); 1022 } 1022 } 1023 1023 1024 for (i = base; i < base + 9; i++) { 1024 for (i = base; i < base + 9; i++) { 1025 XA_BUG_ON(xa, xa_erase(xa, i) 1025 XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 1026 XA_BUG_ON(xa, xa_empty(xa)); 1026 XA_BUG_ON(xa, xa_empty(xa)); 1027 } 1027 } 1028 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL 1028 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 1029 XA_BUG_ON(xa, xa_empty(xa)); 1029 XA_BUG_ON(xa, xa_empty(xa)); 1030 XA_BUG_ON(xa, xa_erase(xa, base + 9) 1030 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 1031 XA_BUG_ON(xa, !xa_empty(xa)); 1031 XA_BUG_ON(xa, !xa_empty(xa)); 1032 1032 1033 xa_destroy(xa); 1033 xa_destroy(xa); 1034 } 1034 } 1035 1035 1036 static noinline void check_xa_alloc_3(struct 1036 static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) 1037 { 1037 { 1038 struct xa_limit limit = XA_LIMIT(1, 0 1038 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 1039 u32 next = 0; 1039 u32 next = 0; 1040 unsigned int i, id; 1040 unsigned int i, id; 1041 unsigned long index; 1041 unsigned long index; 1042 void *entry; 1042 void *entry; 1043 1043 1044 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id 1044 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 1045 &next, GFP_KE 1045 &next, GFP_KERNEL) != 0); 1046 XA_BUG_ON(xa, id != 1); 1046 XA_BUG_ON(xa, id != 1); 1047 1047 1048 next = 0x3ffd; 1048 next = 0x3ffd; 1049 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id 1049 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 1050 &next, GFP_KE 1050 &next, GFP_KERNEL) != 0); 1051 XA_BUG_ON(xa, id != 0x3ffd); 1051 XA_BUG_ON(xa, id != 0x3ffd); 1052 xa_erase_index(xa, 0x3ffd); 1052 xa_erase_index(xa, 0x3ffd); 1053 xa_erase_index(xa, 1); 1053 xa_erase_index(xa, 1); 1054 XA_BUG_ON(xa, !xa_empty(xa)); 1054 XA_BUG_ON(xa, !xa_empty(xa)); 1055 1055 1056 for (i = 0x3ffe; i < 0x4003; i++) { 1056 for (i = 0x3ffe; i < 0x4003; i++) { 1057 if (i < 0x4000) 1057 if (i < 0x4000) 1058 entry = xa_mk_index(i 1058 entry = xa_mk_index(i); 1059 else 1059 else 1060 entry = xa_mk_index(i 1060 entry = xa_mk_index(i - 0x3fff); 1061 XA_BUG_ON(xa, xa_alloc_cyclic 1061 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 1062 &next 1062 &next, GFP_KERNEL) != (id == 1)); 1063 XA_BUG_ON(xa, xa_mk_index(id) 1063 XA_BUG_ON(xa, xa_mk_index(id) != entry); 1064 } 1064 } 1065 1065 1066 /* Check wrap-around is handled corre 1066 /* Check wrap-around is handled correctly */ 1067 if (base != 0) 1067 if (base != 0) 1068 xa_erase_index(xa, base); 1068 xa_erase_index(xa, base); 1069 xa_erase_index(xa, base + 1); 1069 xa_erase_index(xa, base + 1); 1070 next = UINT_MAX; 1070 next = UINT_MAX; 1071 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id 1071 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 1072 xa_limit_32b, 1072 xa_limit_32b, &next, GFP_KERNEL) != 0); 1073 XA_BUG_ON(xa, id != UINT_MAX); 1073 XA_BUG_ON(xa, id != UINT_MAX); 1074 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id 1074 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 1075 xa_limit_32b, 1075 xa_limit_32b, &next, GFP_KERNEL) != 1); 1076 XA_BUG_ON(xa, id != base); 1076 XA_BUG_ON(xa, id != base); 1077 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id 1077 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 1078 xa_limit_32b, 1078 xa_limit_32b, &next, GFP_KERNEL) != 0); 1079 XA_BUG_ON(xa, id != base + 1); 1079 XA_BUG_ON(xa, id != base + 1); 1080 1080 1081 xa_for_each(xa, index, entry) 1081 xa_for_each(xa, index, entry) 1082 xa_erase_index(xa, index); 1082 xa_erase_index(xa, index); 1083 1083 1084 XA_BUG_ON(xa, !xa_empty(xa)); 1084 XA_BUG_ON(xa, !xa_empty(xa)); 1085 } 1085 } 1086 1086 1087 static DEFINE_XARRAY_ALLOC(xa0); 1087 static DEFINE_XARRAY_ALLOC(xa0); 1088 static DEFINE_XARRAY_ALLOC1(xa1); 1088 static DEFINE_XARRAY_ALLOC1(xa1); 1089 1089 1090 static noinline void check_xa_alloc(void) 1090 static noinline void check_xa_alloc(void) 1091 { 1091 { 1092 check_xa_alloc_1(&xa0, 0); 1092 check_xa_alloc_1(&xa0, 0); 1093 check_xa_alloc_1(&xa1, 1); 1093 check_xa_alloc_1(&xa1, 1); 1094 check_xa_alloc_2(&xa0, 0); 1094 check_xa_alloc_2(&xa0, 0); 1095 check_xa_alloc_2(&xa1, 1); 1095 check_xa_alloc_2(&xa1, 1); 1096 check_xa_alloc_3(&xa0, 0); 1096 check_xa_alloc_3(&xa0, 0); 1097 check_xa_alloc_3(&xa1, 1); 1097 check_xa_alloc_3(&xa1, 1); 1098 } 1098 } 1099 1099 1100 static noinline void __check_store_iter(struc 1100 static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 1101 unsigned int order, u 1101 unsigned int order, unsigned int present) 1102 { 1102 { 1103 XA_STATE_ORDER(xas, xa, start, order) 1103 XA_STATE_ORDER(xas, xa, start, order); 1104 void *entry; 1104 void *entry; 1105 unsigned int count = 0; 1105 unsigned int count = 0; 1106 1106 1107 retry: 1107 retry: 1108 xas_lock(&xas); 1108 xas_lock(&xas); 1109 xas_for_each_conflict(&xas, entry) { 1109 xas_for_each_conflict(&xas, entry) { 1110 XA_BUG_ON(xa, !xa_is_value(en 1110 XA_BUG_ON(xa, !xa_is_value(entry)); 1111 XA_BUG_ON(xa, entry < xa_mk_i 1111 XA_BUG_ON(xa, entry < xa_mk_index(start)); 1112 XA_BUG_ON(xa, entry > xa_mk_i 1112 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 1113 count++; 1113 count++; 1114 } 1114 } 1115 xas_store(&xas, xa_mk_index(start)); 1115 xas_store(&xas, xa_mk_index(start)); 1116 xas_unlock(&xas); 1116 xas_unlock(&xas); 1117 if (xas_nomem(&xas, GFP_KERNEL)) { 1117 if (xas_nomem(&xas, GFP_KERNEL)) { 1118 count = 0; 1118 count = 0; 1119 goto retry; 1119 goto retry; 1120 } 1120 } 1121 XA_BUG_ON(xa, xas_error(&xas)); 1121 XA_BUG_ON(xa, xas_error(&xas)); 1122 XA_BUG_ON(xa, count != present); 1122 XA_BUG_ON(xa, count != present); 1123 XA_BUG_ON(xa, xa_load(xa, start) != x 1123 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 1124 XA_BUG_ON(xa, xa_load(xa, start + (1U 1124 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 1125 xa_mk_index(start)); 1125 xa_mk_index(start)); 1126 xa_erase_index(xa, start); 1126 xa_erase_index(xa, start); 1127 } 1127 } 1128 1128 1129 static noinline void check_store_iter(struct 1129 static noinline void check_store_iter(struct xarray *xa) 1130 { 1130 { 1131 unsigned int i, j; 1131 unsigned int i, j; 1132 unsigned int max_order = IS_ENABLED(C 1132 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 1133 1133 1134 for (i = 0; i < max_order; i++) { 1134 for (i = 0; i < max_order; i++) { 1135 unsigned int min = 1 << i; 1135 unsigned int min = 1 << i; 1136 unsigned int max = (2 << i) - 1136 unsigned int max = (2 << i) - 1; 1137 __check_store_iter(xa, 0, i, 1137 __check_store_iter(xa, 0, i, 0); 1138 XA_BUG_ON(xa, !xa_empty(xa)); 1138 XA_BUG_ON(xa, !xa_empty(xa)); 1139 __check_store_iter(xa, min, i 1139 __check_store_iter(xa, min, i, 0); 1140 XA_BUG_ON(xa, !xa_empty(xa)); 1140 XA_BUG_ON(xa, !xa_empty(xa)); 1141 1141 1142 xa_store_index(xa, min, GFP_K 1142 xa_store_index(xa, min, GFP_KERNEL); 1143 __check_store_iter(xa, min, i 1143 __check_store_iter(xa, min, i, 1); 1144 XA_BUG_ON(xa, !xa_empty(xa)); 1144 XA_BUG_ON(xa, !xa_empty(xa)); 1145 xa_store_index(xa, max, GFP_K 1145 xa_store_index(xa, max, GFP_KERNEL); 1146 __check_store_iter(xa, min, i 1146 __check_store_iter(xa, min, i, 1); 1147 XA_BUG_ON(xa, !xa_empty(xa)); 1147 XA_BUG_ON(xa, !xa_empty(xa)); 1148 1148 1149 for (j = 0; j < min; j++) 1149 for (j = 0; j < min; j++) 1150 xa_store_index(xa, j, 1150 xa_store_index(xa, j, GFP_KERNEL); 1151 __check_store_iter(xa, 0, i, 1151 __check_store_iter(xa, 0, i, min); 1152 XA_BUG_ON(xa, !xa_empty(xa)); 1152 XA_BUG_ON(xa, !xa_empty(xa)); 1153 for (j = 0; j < min; j++) 1153 for (j = 0; j < min; j++) 1154 xa_store_index(xa, mi 1154 xa_store_index(xa, min + j, GFP_KERNEL); 1155 __check_store_iter(xa, min, i 1155 __check_store_iter(xa, min, i, min); 1156 XA_BUG_ON(xa, !xa_empty(xa)); 1156 XA_BUG_ON(xa, !xa_empty(xa)); 1157 } 1157 } 1158 #ifdef CONFIG_XARRAY_MULTI 1158 #ifdef CONFIG_XARRAY_MULTI 1159 xa_store_index(xa, 63, GFP_KERNEL); 1159 xa_store_index(xa, 63, GFP_KERNEL); 1160 xa_store_index(xa, 65, GFP_KERNEL); 1160 xa_store_index(xa, 65, GFP_KERNEL); 1161 __check_store_iter(xa, 64, 2, 1); 1161 __check_store_iter(xa, 64, 2, 1); 1162 xa_erase_index(xa, 63); 1162 xa_erase_index(xa, 63); 1163 #endif 1163 #endif 1164 XA_BUG_ON(xa, !xa_empty(xa)); 1164 XA_BUG_ON(xa, !xa_empty(xa)); 1165 } 1165 } 1166 1166 1167 static noinline void check_multi_find_1(struc 1167 static noinline void check_multi_find_1(struct xarray *xa, unsigned order) 1168 { 1168 { 1169 #ifdef CONFIG_XARRAY_MULTI 1169 #ifdef CONFIG_XARRAY_MULTI 1170 unsigned long multi = 3 << order; 1170 unsigned long multi = 3 << order; 1171 unsigned long next = 4 << order; 1171 unsigned long next = 4 << order; 1172 unsigned long index; 1172 unsigned long index; 1173 1173 1174 xa_store_order(xa, multi, order, xa_m 1174 xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); 1175 XA_BUG_ON(xa, xa_store_index(xa, next 1175 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); 1176 XA_BUG_ON(xa, xa_store_index(xa, next 1176 XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); 1177 1177 1178 index = 0; 1178 index = 0; 1179 XA_BUG_ON(xa, xa_find(xa, &index, ULO 1179 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1180 xa_mk_value(multi)); 1180 xa_mk_value(multi)); 1181 XA_BUG_ON(xa, index != multi); 1181 XA_BUG_ON(xa, index != multi); 1182 index = multi + 1; 1182 index = multi + 1; 1183 XA_BUG_ON(xa, xa_find(xa, &index, ULO 1183 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 1184 xa_mk_value(multi)); 1184 xa_mk_value(multi)); 1185 XA_BUG_ON(xa, (index < multi) || (ind 1185 XA_BUG_ON(xa, (index < multi) || (index >= next)); 1186 XA_BUG_ON(xa, xa_find_after(xa, &inde 1186 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 1187 xa_mk_value(next)); 1187 xa_mk_value(next)); 1188 XA_BUG_ON(xa, index != next); 1188 XA_BUG_ON(xa, index != next); 1189 XA_BUG_ON(xa, xa_find_after(xa, &inde 1189 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); 1190 XA_BUG_ON(xa, index != next); 1190 XA_BUG_ON(xa, index != next); 1191 1191 1192 xa_erase_index(xa, multi); 1192 xa_erase_index(xa, multi); 1193 xa_erase_index(xa, next); 1193 xa_erase_index(xa, next); 1194 xa_erase_index(xa, next + 1); 1194 xa_erase_index(xa, next + 1); 1195 XA_BUG_ON(xa, !xa_empty(xa)); 1195 XA_BUG_ON(xa, !xa_empty(xa)); 1196 #endif 1196 #endif 1197 } 1197 } 1198 1198 1199 static noinline void check_multi_find_2(struc 1199 static noinline void check_multi_find_2(struct xarray *xa) 1200 { 1200 { 1201 unsigned int max_order = IS_ENABLED(C 1201 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 1202 unsigned int i, j; 1202 unsigned int i, j; 1203 void *entry; 1203 void *entry; 1204 1204 1205 for (i = 0; i < max_order; i++) { 1205 for (i = 0; i < max_order; i++) { 1206 unsigned long index = 1UL << 1206 unsigned long index = 1UL << i; 1207 for (j = 0; j < index; j++) { 1207 for (j = 0; j < index; j++) { 1208 XA_STATE(xas, xa, j + 1208 XA_STATE(xas, xa, j + index); 1209 xa_store_index(xa, in 1209 xa_store_index(xa, index - 1, GFP_KERNEL); 1210 xa_store_order(xa, in 1210 xa_store_order(xa, index, i, xa_mk_index(index), 1211 GFP_K 1211 GFP_KERNEL); 1212 rcu_read_lock(); 1212 rcu_read_lock(); 1213 xas_for_each(&xas, en 1213 xas_for_each(&xas, entry, ULONG_MAX) { 1214 xa_erase_inde 1214 xa_erase_index(xa, index); 1215 } 1215 } 1216 rcu_read_unlock(); 1216 rcu_read_unlock(); 1217 xa_erase_index(xa, in 1217 xa_erase_index(xa, index - 1); 1218 XA_BUG_ON(xa, !xa_emp 1218 XA_BUG_ON(xa, !xa_empty(xa)); 1219 } 1219 } 1220 } 1220 } 1221 } 1221 } 1222 1222 1223 static noinline void check_multi_find_3(struc 1223 static noinline void check_multi_find_3(struct xarray *xa) 1224 { 1224 { 1225 unsigned int order; 1225 unsigned int order; 1226 1226 1227 for (order = 5; order < order_limit; 1227 for (order = 5; order < order_limit; order++) { 1228 unsigned long index = 1UL << 1228 unsigned long index = 1UL << (order - 5); 1229 1229 1230 XA_BUG_ON(xa, !xa_empty(xa)); 1230 XA_BUG_ON(xa, !xa_empty(xa)); 1231 xa_store_order(xa, 0, order - 1231 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); 1232 XA_BUG_ON(xa, xa_find_after(x 1232 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); 1233 xa_erase_index(xa, 0); 1233 xa_erase_index(xa, 0); 1234 } 1234 } 1235 } 1235 } 1236 1236 1237 static noinline void check_find_1(struct xarr 1237 static noinline void check_find_1(struct xarray *xa) 1238 { 1238 { 1239 unsigned long i, j, k; 1239 unsigned long i, j, k; 1240 1240 1241 XA_BUG_ON(xa, !xa_empty(xa)); 1241 XA_BUG_ON(xa, !xa_empty(xa)); 1242 1242 1243 /* 1243 /* 1244 * Check xa_find with all pairs betwe 1244 * Check xa_find with all pairs between 0 and 99 inclusive, 1245 * starting at every index between 0 1245 * starting at every index between 0 and 99 1246 */ 1246 */ 1247 for (i = 0; i < 100; i++) { 1247 for (i = 0; i < 100; i++) { 1248 XA_BUG_ON(xa, xa_store_index( 1248 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1249 xa_set_mark(xa, i, XA_MARK_0) 1249 xa_set_mark(xa, i, XA_MARK_0); 1250 for (j = 0; j < i; j++) { 1250 for (j = 0; j < i; j++) { 1251 XA_BUG_ON(xa, xa_stor 1251 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 1252 NULL) 1252 NULL); 1253 xa_set_mark(xa, j, XA 1253 xa_set_mark(xa, j, XA_MARK_0); 1254 for (k = 0; k < 100; 1254 for (k = 0; k < 100; k++) { 1255 unsigned long 1255 unsigned long index = k; 1256 void *entry = 1256 void *entry = xa_find(xa, &index, ULONG_MAX, 1257 1257 XA_PRESENT); 1258 if (k <= j) 1258 if (k <= j) 1259 XA_BU 1259 XA_BUG_ON(xa, index != j); 1260 else if (k <= 1260 else if (k <= i) 1261 XA_BU 1261 XA_BUG_ON(xa, index != i); 1262 else 1262 else 1263 XA_BU 1263 XA_BUG_ON(xa, entry != NULL); 1264 1264 1265 index = k; 1265 index = k; 1266 entry = xa_fi 1266 entry = xa_find(xa, &index, ULONG_MAX, 1267 1267 XA_MARK_0); 1268 if (k <= j) 1268 if (k <= j) 1269 XA_BU 1269 XA_BUG_ON(xa, index != j); 1270 else if (k <= 1270 else if (k <= i) 1271 XA_BU 1271 XA_BUG_ON(xa, index != i); 1272 else 1272 else 1273 XA_BU 1273 XA_BUG_ON(xa, entry != NULL); 1274 } 1274 } 1275 xa_erase_index(xa, j) 1275 xa_erase_index(xa, j); 1276 XA_BUG_ON(xa, xa_get_ 1276 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 1277 XA_BUG_ON(xa, !xa_get 1277 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 1278 } 1278 } 1279 xa_erase_index(xa, i); 1279 xa_erase_index(xa, i); 1280 XA_BUG_ON(xa, xa_get_mark(xa, 1280 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 1281 } 1281 } 1282 XA_BUG_ON(xa, !xa_empty(xa)); 1282 XA_BUG_ON(xa, !xa_empty(xa)); 1283 } 1283 } 1284 1284 1285 static noinline void check_find_2(struct xarr 1285 static noinline void check_find_2(struct xarray *xa) 1286 { 1286 { 1287 void *entry; 1287 void *entry; 1288 unsigned long i, j, index; 1288 unsigned long i, j, index; 1289 1289 1290 xa_for_each(xa, index, entry) { 1290 xa_for_each(xa, index, entry) { 1291 XA_BUG_ON(xa, true); 1291 XA_BUG_ON(xa, true); 1292 } 1292 } 1293 1293 1294 for (i = 0; i < 1024; i++) { 1294 for (i = 0; i < 1024; i++) { 1295 xa_store_index(xa, index, GFP 1295 xa_store_index(xa, index, GFP_KERNEL); 1296 j = 0; 1296 j = 0; 1297 xa_for_each(xa, index, entry) 1297 xa_for_each(xa, index, entry) { 1298 XA_BUG_ON(xa, xa_mk_i 1298 XA_BUG_ON(xa, xa_mk_index(index) != entry); 1299 XA_BUG_ON(xa, index ! 1299 XA_BUG_ON(xa, index != j++); 1300 } 1300 } 1301 } 1301 } 1302 1302 1303 xa_destroy(xa); 1303 xa_destroy(xa); 1304 } 1304 } 1305 1305 1306 static noinline void check_find_3(struct xarr 1306 static noinline void check_find_3(struct xarray *xa) 1307 { 1307 { 1308 XA_STATE(xas, xa, 0); 1308 XA_STATE(xas, xa, 0); 1309 unsigned long i, j, k; 1309 unsigned long i, j, k; 1310 void *entry; 1310 void *entry; 1311 1311 1312 for (i = 0; i < 100; i++) { 1312 for (i = 0; i < 100; i++) { 1313 for (j = 0; j < 100; j++) { 1313 for (j = 0; j < 100; j++) { 1314 rcu_read_lock(); 1314 rcu_read_lock(); 1315 for (k = 0; k < 100; 1315 for (k = 0; k < 100; k++) { 1316 xas_set(&xas, 1316 xas_set(&xas, j); 1317 xas_for_each_ 1317 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 1318 ; 1318 ; 1319 if (j > k) 1319 if (j > k) 1320 XA_BU 1320 XA_BUG_ON(xa, 1321 1321 xas.xa_node != XAS_RESTART); 1322 } 1322 } 1323 rcu_read_unlock(); 1323 rcu_read_unlock(); 1324 } 1324 } 1325 xa_store_index(xa, i, GFP_KER 1325 xa_store_index(xa, i, GFP_KERNEL); 1326 xa_set_mark(xa, i, XA_MARK_0) 1326 xa_set_mark(xa, i, XA_MARK_0); 1327 } 1327 } 1328 xa_destroy(xa); 1328 xa_destroy(xa); 1329 } 1329 } 1330 1330 1331 static noinline void check_find_4(struct xarr 1331 static noinline void check_find_4(struct xarray *xa) 1332 { 1332 { 1333 unsigned long index = 0; 1333 unsigned long index = 0; 1334 void *entry; 1334 void *entry; 1335 1335 1336 xa_store_index(xa, ULONG_MAX, GFP_KER 1336 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1337 1337 1338 entry = xa_find_after(xa, &index, ULO 1338 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1339 XA_BUG_ON(xa, entry != xa_mk_index(UL 1339 XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); 1340 1340 1341 entry = xa_find_after(xa, &index, ULO 1341 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1342 XA_BUG_ON(xa, entry); 1342 XA_BUG_ON(xa, entry); 1343 1343 1344 xa_erase_index(xa, ULONG_MAX); 1344 xa_erase_index(xa, ULONG_MAX); 1345 } 1345 } 1346 1346 1347 static noinline void check_find(struct xarray 1347 static noinline void check_find(struct xarray *xa) 1348 { 1348 { 1349 unsigned i; 1349 unsigned i; 1350 1350 1351 check_find_1(xa); 1351 check_find_1(xa); 1352 check_find_2(xa); 1352 check_find_2(xa); 1353 check_find_3(xa); 1353 check_find_3(xa); 1354 check_find_4(xa); 1354 check_find_4(xa); 1355 1355 1356 for (i = 2; i < 10; i++) 1356 for (i = 2; i < 10; i++) 1357 check_multi_find_1(xa, i); 1357 check_multi_find_1(xa, i); 1358 check_multi_find_2(xa); 1358 check_multi_find_2(xa); 1359 check_multi_find_3(xa); 1359 check_multi_find_3(xa); 1360 } 1360 } 1361 1361 1362 /* See find_swap_entry() in mm/shmem.c */ 1362 /* See find_swap_entry() in mm/shmem.c */ 1363 static noinline unsigned long xa_find_entry(s 1363 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1364 { 1364 { 1365 XA_STATE(xas, xa, 0); 1365 XA_STATE(xas, xa, 0); 1366 unsigned int checked = 0; 1366 unsigned int checked = 0; 1367 void *entry; 1367 void *entry; 1368 1368 1369 rcu_read_lock(); 1369 rcu_read_lock(); 1370 xas_for_each(&xas, entry, ULONG_MAX) 1370 xas_for_each(&xas, entry, ULONG_MAX) { 1371 if (xas_retry(&xas, entry)) 1371 if (xas_retry(&xas, entry)) 1372 continue; 1372 continue; 1373 if (entry == item) 1373 if (entry == item) 1374 break; 1374 break; 1375 checked++; 1375 checked++; 1376 if ((checked % 4) != 0) 1376 if ((checked % 4) != 0) 1377 continue; 1377 continue; 1378 xas_pause(&xas); 1378 xas_pause(&xas); 1379 } 1379 } 1380 rcu_read_unlock(); 1380 rcu_read_unlock(); 1381 1381 1382 return entry ? xas.xa_index : -1; 1382 return entry ? xas.xa_index : -1; 1383 } 1383 } 1384 1384 1385 static noinline void check_find_entry(struct 1385 static noinline void check_find_entry(struct xarray *xa) 1386 { 1386 { 1387 #ifdef CONFIG_XARRAY_MULTI 1387 #ifdef CONFIG_XARRAY_MULTI 1388 unsigned int order; 1388 unsigned int order; 1389 unsigned long offset, index; 1389 unsigned long offset, index; 1390 1390 1391 for (order = 0; order < 20; order++) 1391 for (order = 0; order < 20; order++) { 1392 for (offset = 0; offset < (1U 1392 for (offset = 0; offset < (1UL << (order + 3)); 1393 offset += (1UL << order) 1393 offset += (1UL << order)) { 1394 for (index = 0; index 1394 for (index = 0; index < (1UL << (order + 5)); 1395 index += (1UL << 1395 index += (1UL << order)) { 1396 xa_store_orde 1396 xa_store_order(xa, index, order, 1397 1397 xa_mk_index(index), GFP_KERNEL); 1398 XA_BUG_ON(xa, 1398 XA_BUG_ON(xa, xa_load(xa, index) != 1399 1399 xa_mk_index(index)); 1400 XA_BUG_ON(xa, 1400 XA_BUG_ON(xa, xa_find_entry(xa, 1401 1401 xa_mk_index(index)) != index); 1402 } 1402 } 1403 XA_BUG_ON(xa, xa_find 1403 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1404 xa_destroy(xa); 1404 xa_destroy(xa); 1405 } 1405 } 1406 } 1406 } 1407 #endif 1407 #endif 1408 1408 1409 XA_BUG_ON(xa, xa_find_entry(xa, xa) ! 1409 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1410 xa_store_index(xa, ULONG_MAX, GFP_KER 1410 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1411 XA_BUG_ON(xa, xa_find_entry(xa, xa) ! 1411 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1412 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk 1412 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1413 xa_erase_index(xa, ULONG_MAX); 1413 xa_erase_index(xa, ULONG_MAX); 1414 XA_BUG_ON(xa, !xa_empty(xa)); 1414 XA_BUG_ON(xa, !xa_empty(xa)); 1415 } 1415 } 1416 1416 1417 static noinline void check_pause(struct xarra 1417 static noinline void check_pause(struct xarray *xa) 1418 { 1418 { 1419 XA_STATE(xas, xa, 0); 1419 XA_STATE(xas, xa, 0); 1420 void *entry; 1420 void *entry; 1421 unsigned int order; 1421 unsigned int order; 1422 unsigned long index = 1; 1422 unsigned long index = 1; 1423 unsigned int count = 0; 1423 unsigned int count = 0; 1424 1424 1425 for (order = 0; order < order_limit; 1425 for (order = 0; order < order_limit; order++) { 1426 XA_BUG_ON(xa, xa_store_order( 1426 XA_BUG_ON(xa, xa_store_order(xa, index, order, 1427 xa_mk 1427 xa_mk_index(index), GFP_KERNEL)); 1428 index += 1UL << order; 1428 index += 1UL << order; 1429 } 1429 } 1430 1430 1431 rcu_read_lock(); 1431 rcu_read_lock(); 1432 xas_for_each(&xas, entry, ULONG_MAX) 1432 xas_for_each(&xas, entry, ULONG_MAX) { 1433 XA_BUG_ON(xa, entry != xa_mk_ 1433 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1434 count++; 1434 count++; 1435 } 1435 } 1436 rcu_read_unlock(); 1436 rcu_read_unlock(); 1437 XA_BUG_ON(xa, count != order_limit); 1437 XA_BUG_ON(xa, count != order_limit); 1438 1438 1439 count = 0; 1439 count = 0; 1440 xas_set(&xas, 0); 1440 xas_set(&xas, 0); 1441 rcu_read_lock(); 1441 rcu_read_lock(); 1442 xas_for_each(&xas, entry, ULONG_MAX) 1442 xas_for_each(&xas, entry, ULONG_MAX) { 1443 XA_BUG_ON(xa, entry != xa_mk_ 1443 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); 1444 count++; 1444 count++; 1445 xas_pause(&xas); 1445 xas_pause(&xas); 1446 } 1446 } 1447 rcu_read_unlock(); 1447 rcu_read_unlock(); 1448 XA_BUG_ON(xa, count != order_limit); 1448 XA_BUG_ON(xa, count != order_limit); 1449 1449 1450 xa_destroy(xa); 1450 xa_destroy(xa); 1451 } 1451 } 1452 1452 1453 static noinline void check_move_tiny(struct x 1453 static noinline void check_move_tiny(struct xarray *xa) 1454 { 1454 { 1455 XA_STATE(xas, xa, 0); 1455 XA_STATE(xas, xa, 0); 1456 1456 1457 XA_BUG_ON(xa, !xa_empty(xa)); 1457 XA_BUG_ON(xa, !xa_empty(xa)); 1458 rcu_read_lock(); 1458 rcu_read_lock(); 1459 XA_BUG_ON(xa, xas_next(&xas) != NULL) 1459 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1460 XA_BUG_ON(xa, xas_next(&xas) != NULL) 1460 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1461 rcu_read_unlock(); 1461 rcu_read_unlock(); 1462 xa_store_index(xa, 0, GFP_KERNEL); 1462 xa_store_index(xa, 0, GFP_KERNEL); 1463 rcu_read_lock(); 1463 rcu_read_lock(); 1464 xas_set(&xas, 0); 1464 xas_set(&xas, 0); 1465 XA_BUG_ON(xa, xas_next(&xas) != xa_mk 1465 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); 1466 XA_BUG_ON(xa, xas_next(&xas) != NULL) 1466 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1467 xas_set(&xas, 0); 1467 xas_set(&xas, 0); 1468 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk 1468 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); 1469 XA_BUG_ON(xa, xas_prev(&xas) != NULL) 1469 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1470 rcu_read_unlock(); 1470 rcu_read_unlock(); 1471 xa_erase_index(xa, 0); 1471 xa_erase_index(xa, 0); 1472 XA_BUG_ON(xa, !xa_empty(xa)); 1472 XA_BUG_ON(xa, !xa_empty(xa)); 1473 } 1473 } 1474 1474 1475 static noinline void check_move_max(struct xa 1475 static noinline void check_move_max(struct xarray *xa) 1476 { 1476 { 1477 XA_STATE(xas, xa, 0); 1477 XA_STATE(xas, xa, 0); 1478 1478 1479 xa_store_index(xa, ULONG_MAX, GFP_KER 1479 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1480 rcu_read_lock(); 1480 rcu_read_lock(); 1481 XA_BUG_ON(xa, xas_find(&xas, ULONG_MA 1481 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1482 XA_BUG_ON(xa, xas_find(&xas, ULONG_MA 1482 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1483 rcu_read_unlock(); 1483 rcu_read_unlock(); 1484 1484 1485 xas_set(&xas, 0); 1485 xas_set(&xas, 0); 1486 rcu_read_lock(); 1486 rcu_read_lock(); 1487 XA_BUG_ON(xa, xas_find(&xas, ULONG_MA 1487 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); 1488 xas_pause(&xas); 1488 xas_pause(&xas); 1489 XA_BUG_ON(xa, xas_find(&xas, ULONG_MA 1489 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1490 rcu_read_unlock(); 1490 rcu_read_unlock(); 1491 1491 1492 xa_erase_index(xa, ULONG_MAX); 1492 xa_erase_index(xa, ULONG_MAX); 1493 XA_BUG_ON(xa, !xa_empty(xa)); 1493 XA_BUG_ON(xa, !xa_empty(xa)); 1494 } 1494 } 1495 1495 1496 static noinline void check_move_small(struct 1496 static noinline void check_move_small(struct xarray *xa, unsigned long idx) 1497 { 1497 { 1498 XA_STATE(xas, xa, 0); 1498 XA_STATE(xas, xa, 0); 1499 unsigned long i; 1499 unsigned long i; 1500 1500 1501 xa_store_index(xa, 0, GFP_KERNEL); 1501 xa_store_index(xa, 0, GFP_KERNEL); 1502 xa_store_index(xa, idx, GFP_KERNEL); 1502 xa_store_index(xa, idx, GFP_KERNEL); 1503 1503 1504 rcu_read_lock(); 1504 rcu_read_lock(); 1505 for (i = 0; i < idx * 4; i++) { 1505 for (i = 0; i < idx * 4; i++) { 1506 void *entry = xas_next(&xas); 1506 void *entry = xas_next(&xas); 1507 if (i <= idx) 1507 if (i <= idx) 1508 XA_BUG_ON(xa, xas.xa_ 1508 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1509 XA_BUG_ON(xa, xas.xa_index != 1509 XA_BUG_ON(xa, xas.xa_index != i); 1510 if (i == 0 || i == idx) 1510 if (i == 0 || i == idx) 1511 XA_BUG_ON(xa, entry ! 1511 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1512 else 1512 else 1513 XA_BUG_ON(xa, entry ! 1513 XA_BUG_ON(xa, entry != NULL); 1514 } 1514 } 1515 xas_next(&xas); 1515 xas_next(&xas); 1516 XA_BUG_ON(xa, xas.xa_index != i); 1516 XA_BUG_ON(xa, xas.xa_index != i); 1517 1517 1518 do { 1518 do { 1519 void *entry = xas_prev(&xas); 1519 void *entry = xas_prev(&xas); 1520 i--; 1520 i--; 1521 if (i <= idx) 1521 if (i <= idx) 1522 XA_BUG_ON(xa, xas.xa_ 1522 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 1523 XA_BUG_ON(xa, xas.xa_index != 1523 XA_BUG_ON(xa, xas.xa_index != i); 1524 if (i == 0 || i == idx) 1524 if (i == 0 || i == idx) 1525 XA_BUG_ON(xa, entry ! 1525 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1526 else 1526 else 1527 XA_BUG_ON(xa, entry ! 1527 XA_BUG_ON(xa, entry != NULL); 1528 } while (i > 0); 1528 } while (i > 0); 1529 1529 1530 xas_set(&xas, ULONG_MAX); 1530 xas_set(&xas, ULONG_MAX); 1531 XA_BUG_ON(xa, xas_next(&xas) != NULL) 1531 XA_BUG_ON(xa, xas_next(&xas) != NULL); 1532 XA_BUG_ON(xa, xas.xa_index != ULONG_M 1532 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1533 XA_BUG_ON(xa, xas_next(&xas) != xa_mk 1533 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 1534 XA_BUG_ON(xa, xas.xa_index != 0); 1534 XA_BUG_ON(xa, xas.xa_index != 0); 1535 XA_BUG_ON(xa, xas_prev(&xas) != NULL) 1535 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1536 XA_BUG_ON(xa, xas.xa_index != ULONG_M 1536 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1537 rcu_read_unlock(); 1537 rcu_read_unlock(); 1538 1538 1539 xa_erase_index(xa, 0); 1539 xa_erase_index(xa, 0); 1540 xa_erase_index(xa, idx); 1540 xa_erase_index(xa, idx); 1541 XA_BUG_ON(xa, !xa_empty(xa)); 1541 XA_BUG_ON(xa, !xa_empty(xa)); 1542 } 1542 } 1543 1543 1544 static noinline void check_move(struct xarray 1544 static noinline void check_move(struct xarray *xa) 1545 { 1545 { 1546 XA_STATE(xas, xa, (1 << 16) - 1); 1546 XA_STATE(xas, xa, (1 << 16) - 1); 1547 unsigned long i; 1547 unsigned long i; 1548 1548 1549 for (i = 0; i < (1 << 16); i++) 1549 for (i = 0; i < (1 << 16); i++) 1550 XA_BUG_ON(xa, xa_store_index( 1550 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 1551 1551 1552 rcu_read_lock(); 1552 rcu_read_lock(); 1553 do { 1553 do { 1554 void *entry = xas_prev(&xas); 1554 void *entry = xas_prev(&xas); 1555 i--; 1555 i--; 1556 XA_BUG_ON(xa, entry != xa_mk_ 1556 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1557 XA_BUG_ON(xa, i != xas.xa_ind 1557 XA_BUG_ON(xa, i != xas.xa_index); 1558 } while (i != 0); 1558 } while (i != 0); 1559 1559 1560 XA_BUG_ON(xa, xas_prev(&xas) != NULL) 1560 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1561 XA_BUG_ON(xa, xas.xa_index != ULONG_M 1561 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1562 1562 1563 do { 1563 do { 1564 void *entry = xas_next(&xas); 1564 void *entry = xas_next(&xas); 1565 XA_BUG_ON(xa, entry != xa_mk_ 1565 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1566 XA_BUG_ON(xa, i != xas.xa_ind 1566 XA_BUG_ON(xa, i != xas.xa_index); 1567 i++; 1567 i++; 1568 } while (i < (1 << 16)); 1568 } while (i < (1 << 16)); 1569 rcu_read_unlock(); 1569 rcu_read_unlock(); 1570 1570 1571 for (i = (1 << 8); i < (1 << 15); i++ 1571 for (i = (1 << 8); i < (1 << 15); i++) 1572 xa_erase_index(xa, i); 1572 xa_erase_index(xa, i); 1573 1573 1574 i = xas.xa_index; 1574 i = xas.xa_index; 1575 1575 1576 rcu_read_lock(); 1576 rcu_read_lock(); 1577 do { 1577 do { 1578 void *entry = xas_prev(&xas); 1578 void *entry = xas_prev(&xas); 1579 i--; 1579 i--; 1580 if ((i < (1 << 8)) || (i >= ( 1580 if ((i < (1 << 8)) || (i >= (1 << 15))) 1581 XA_BUG_ON(xa, entry ! 1581 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1582 else 1582 else 1583 XA_BUG_ON(xa, entry ! 1583 XA_BUG_ON(xa, entry != NULL); 1584 XA_BUG_ON(xa, i != xas.xa_ind 1584 XA_BUG_ON(xa, i != xas.xa_index); 1585 } while (i != 0); 1585 } while (i != 0); 1586 1586 1587 XA_BUG_ON(xa, xas_prev(&xas) != NULL) 1587 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1588 XA_BUG_ON(xa, xas.xa_index != ULONG_M 1588 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1589 1589 1590 do { 1590 do { 1591 void *entry = xas_next(&xas); 1591 void *entry = xas_next(&xas); 1592 if ((i < (1 << 8)) || (i >= ( 1592 if ((i < (1 << 8)) || (i >= (1 << 15))) 1593 XA_BUG_ON(xa, entry ! 1593 XA_BUG_ON(xa, entry != xa_mk_index(i)); 1594 else 1594 else 1595 XA_BUG_ON(xa, entry ! 1595 XA_BUG_ON(xa, entry != NULL); 1596 XA_BUG_ON(xa, i != xas.xa_ind 1596 XA_BUG_ON(xa, i != xas.xa_index); 1597 i++; 1597 i++; 1598 } while (i < (1 << 16)); 1598 } while (i < (1 << 16)); 1599 rcu_read_unlock(); 1599 rcu_read_unlock(); 1600 1600 1601 xa_destroy(xa); 1601 xa_destroy(xa); 1602 1602 1603 check_move_tiny(xa); 1603 check_move_tiny(xa); 1604 check_move_max(xa); 1604 check_move_max(xa); 1605 1605 1606 for (i = 0; i < 16; i++) 1606 for (i = 0; i < 16; i++) 1607 check_move_small(xa, 1UL << i 1607 check_move_small(xa, 1UL << i); 1608 1608 1609 for (i = 2; i < 16; i++) 1609 for (i = 2; i < 16; i++) 1610 check_move_small(xa, (1UL << 1610 check_move_small(xa, (1UL << i) - 1); 1611 } 1611 } 1612 1612 1613 static noinline void xa_store_many_order(stru 1613 static noinline void xa_store_many_order(struct xarray *xa, 1614 unsigned long index, unsigned 1614 unsigned long index, unsigned order) 1615 { 1615 { 1616 XA_STATE_ORDER(xas, xa, index, order) 1616 XA_STATE_ORDER(xas, xa, index, order); 1617 unsigned int i = 0; 1617 unsigned int i = 0; 1618 1618 1619 do { 1619 do { 1620 xas_lock(&xas); 1620 xas_lock(&xas); 1621 XA_BUG_ON(xa, xas_find_confli 1621 XA_BUG_ON(xa, xas_find_conflict(&xas)); 1622 xas_create_range(&xas); 1622 xas_create_range(&xas); 1623 if (xas_error(&xas)) 1623 if (xas_error(&xas)) 1624 goto unlock; 1624 goto unlock; 1625 for (i = 0; i < (1U << order) 1625 for (i = 0; i < (1U << order); i++) { 1626 XA_BUG_ON(xa, xas_sto 1626 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 1627 xas_next(&xas); 1627 xas_next(&xas); 1628 } 1628 } 1629 unlock: 1629 unlock: 1630 xas_unlock(&xas); 1630 xas_unlock(&xas); 1631 } while (xas_nomem(&xas, GFP_KERNEL)) 1631 } while (xas_nomem(&xas, GFP_KERNEL)); 1632 1632 1633 XA_BUG_ON(xa, xas_error(&xas)); 1633 XA_BUG_ON(xa, xas_error(&xas)); 1634 } 1634 } 1635 1635 1636 static noinline void check_create_range_1(str 1636 static noinline void check_create_range_1(struct xarray *xa, 1637 unsigned long index, unsigned 1637 unsigned long index, unsigned order) 1638 { 1638 { 1639 unsigned long i; 1639 unsigned long i; 1640 1640 1641 xa_store_many_order(xa, index, order) 1641 xa_store_many_order(xa, index, order); 1642 for (i = index; i < index + (1UL << o 1642 for (i = index; i < index + (1UL << order); i++) 1643 xa_erase_index(xa, i); 1643 xa_erase_index(xa, i); 1644 XA_BUG_ON(xa, !xa_empty(xa)); 1644 XA_BUG_ON(xa, !xa_empty(xa)); 1645 } 1645 } 1646 1646 1647 static noinline void check_create_range_2(str 1647 static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1648 { 1648 { 1649 unsigned long i; 1649 unsigned long i; 1650 unsigned long nr = 1UL << order; 1650 unsigned long nr = 1UL << order; 1651 1651 1652 for (i = 0; i < nr * nr; i += nr) 1652 for (i = 0; i < nr * nr; i += nr) 1653 xa_store_many_order(xa, i, or 1653 xa_store_many_order(xa, i, order); 1654 for (i = 0; i < nr * nr; i++) 1654 for (i = 0; i < nr * nr; i++) 1655 xa_erase_index(xa, i); 1655 xa_erase_index(xa, i); 1656 XA_BUG_ON(xa, !xa_empty(xa)); 1656 XA_BUG_ON(xa, !xa_empty(xa)); 1657 } 1657 } 1658 1658 1659 static noinline void check_create_range_3(voi 1659 static noinline void check_create_range_3(void) 1660 { 1660 { 1661 XA_STATE(xas, NULL, 0); 1661 XA_STATE(xas, NULL, 0); 1662 xas_set_err(&xas, -EEXIST); 1662 xas_set_err(&xas, -EEXIST); 1663 xas_create_range(&xas); 1663 xas_create_range(&xas); 1664 XA_BUG_ON(NULL, xas_error(&xas) != -E 1664 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1665 } 1665 } 1666 1666 1667 static noinline void check_create_range_4(str 1667 static noinline void check_create_range_4(struct xarray *xa, 1668 unsigned long index, unsigned 1668 unsigned long index, unsigned order) 1669 { 1669 { 1670 XA_STATE_ORDER(xas, xa, index, order) 1670 XA_STATE_ORDER(xas, xa, index, order); 1671 unsigned long base = xas.xa_index; 1671 unsigned long base = xas.xa_index; 1672 unsigned long i = 0; 1672 unsigned long i = 0; 1673 1673 1674 xa_store_index(xa, index, GFP_KERNEL) 1674 xa_store_index(xa, index, GFP_KERNEL); 1675 do { 1675 do { 1676 xas_lock(&xas); 1676 xas_lock(&xas); 1677 xas_create_range(&xas); 1677 xas_create_range(&xas); 1678 if (xas_error(&xas)) 1678 if (xas_error(&xas)) 1679 goto unlock; 1679 goto unlock; 1680 for (i = 0; i < (1UL << order 1680 for (i = 0; i < (1UL << order); i++) { 1681 void *old = xas_store 1681 void *old = xas_store(&xas, xa_mk_index(base + i)); 1682 if (xas.xa_index == i 1682 if (xas.xa_index == index) 1683 XA_BUG_ON(xa, 1683 XA_BUG_ON(xa, old != xa_mk_index(base + i)); 1684 else 1684 else 1685 XA_BUG_ON(xa, 1685 XA_BUG_ON(xa, old != NULL); 1686 xas_next(&xas); 1686 xas_next(&xas); 1687 } 1687 } 1688 unlock: 1688 unlock: 1689 xas_unlock(&xas); 1689 xas_unlock(&xas); 1690 } while (xas_nomem(&xas, GFP_KERNEL)) 1690 } while (xas_nomem(&xas, GFP_KERNEL)); 1691 1691 1692 XA_BUG_ON(xa, xas_error(&xas)); 1692 XA_BUG_ON(xa, xas_error(&xas)); 1693 1693 1694 for (i = base; i < base + (1UL << ord 1694 for (i = base; i < base + (1UL << order); i++) 1695 xa_erase_index(xa, i); 1695 xa_erase_index(xa, i); 1696 XA_BUG_ON(xa, !xa_empty(xa)); 1696 XA_BUG_ON(xa, !xa_empty(xa)); 1697 } 1697 } 1698 1698 1699 static noinline void check_create_range_5(str 1699 static noinline void check_create_range_5(struct xarray *xa, 1700 unsigned long index, unsigned 1700 unsigned long index, unsigned int order) 1701 { 1701 { 1702 XA_STATE_ORDER(xas, xa, index, order) 1702 XA_STATE_ORDER(xas, xa, index, order); 1703 unsigned int i; 1703 unsigned int i; 1704 1704 1705 xa_store_order(xa, index, order, xa_m 1705 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 1706 1706 1707 for (i = 0; i < order + 10; i++) { 1707 for (i = 0; i < order + 10; i++) { 1708 do { 1708 do { 1709 xas_lock(&xas); 1709 xas_lock(&xas); 1710 xas_create_range(&xas 1710 xas_create_range(&xas); 1711 xas_unlock(&xas); 1711 xas_unlock(&xas); 1712 } while (xas_nomem(&xas, GFP_ 1712 } while (xas_nomem(&xas, GFP_KERNEL)); 1713 } 1713 } 1714 1714 1715 xa_destroy(xa); 1715 xa_destroy(xa); 1716 } 1716 } 1717 1717 1718 static noinline void check_create_range(struc 1718 static noinline void check_create_range(struct xarray *xa) 1719 { 1719 { 1720 unsigned int order; 1720 unsigned int order; 1721 unsigned int max_order = IS_ENABLED(C 1721 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1722 1722 1723 for (order = 0; order < max_order; or 1723 for (order = 0; order < max_order; order++) { 1724 check_create_range_1(xa, 0, o 1724 check_create_range_1(xa, 0, order); 1725 check_create_range_1(xa, 1U < 1725 check_create_range_1(xa, 1U << order, order); 1726 check_create_range_1(xa, 2U < 1726 check_create_range_1(xa, 2U << order, order); 1727 check_create_range_1(xa, 3U < 1727 check_create_range_1(xa, 3U << order, order); 1728 check_create_range_1(xa, 1U < 1728 check_create_range_1(xa, 1U << 24, order); 1729 if (order < 10) 1729 if (order < 10) 1730 check_create_range_2( 1730 check_create_range_2(xa, order); 1731 1731 1732 check_create_range_4(xa, 0, o 1732 check_create_range_4(xa, 0, order); 1733 check_create_range_4(xa, 1U < 1733 check_create_range_4(xa, 1U << order, order); 1734 check_create_range_4(xa, 2U < 1734 check_create_range_4(xa, 2U << order, order); 1735 check_create_range_4(xa, 3U < 1735 check_create_range_4(xa, 3U << order, order); 1736 check_create_range_4(xa, 1U < 1736 check_create_range_4(xa, 1U << 24, order); 1737 1737 1738 check_create_range_4(xa, 1, o 1738 check_create_range_4(xa, 1, order); 1739 check_create_range_4(xa, (1U 1739 check_create_range_4(xa, (1U << order) + 1, order); 1740 check_create_range_4(xa, (2U 1740 check_create_range_4(xa, (2U << order) + 1, order); 1741 check_create_range_4(xa, (2U 1741 check_create_range_4(xa, (2U << order) - 1, order); 1742 check_create_range_4(xa, (3U 1742 check_create_range_4(xa, (3U << order) + 1, order); 1743 check_create_range_4(xa, (3U 1743 check_create_range_4(xa, (3U << order) - 1, order); 1744 check_create_range_4(xa, (1U 1744 check_create_range_4(xa, (1U << 24) + 1, order); 1745 1745 1746 check_create_range_5(xa, 0, o 1746 check_create_range_5(xa, 0, order); 1747 check_create_range_5(xa, (1U 1747 check_create_range_5(xa, (1U << order), order); 1748 } 1748 } 1749 1749 1750 check_create_range_3(); 1750 check_create_range_3(); 1751 } 1751 } 1752 1752 1753 static noinline void __check_store_range(stru 1753 static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1754 unsigned long last) 1754 unsigned long last) 1755 { 1755 { 1756 #ifdef CONFIG_XARRAY_MULTI 1756 #ifdef CONFIG_XARRAY_MULTI 1757 xa_store_range(xa, first, last, xa_mk 1757 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1758 1758 1759 XA_BUG_ON(xa, xa_load(xa, first) != x 1759 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1760 XA_BUG_ON(xa, xa_load(xa, last) != xa 1760 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 1761 XA_BUG_ON(xa, xa_load(xa, first - 1) 1761 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1762 XA_BUG_ON(xa, xa_load(xa, last + 1) ! 1762 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1763 1763 1764 xa_store_range(xa, first, last, NULL, 1764 xa_store_range(xa, first, last, NULL, GFP_KERNEL); 1765 #endif 1765 #endif 1766 1766 1767 XA_BUG_ON(xa, !xa_empty(xa)); 1767 XA_BUG_ON(xa, !xa_empty(xa)); 1768 } 1768 } 1769 1769 1770 static noinline void check_store_range(struct 1770 static noinline void check_store_range(struct xarray *xa) 1771 { 1771 { 1772 unsigned long i, j; 1772 unsigned long i, j; 1773 1773 1774 for (i = 0; i < 128; i++) { 1774 for (i = 0; i < 128; i++) { 1775 for (j = i; j < 128; j++) { 1775 for (j = i; j < 128; j++) { 1776 __check_store_range(x 1776 __check_store_range(xa, i, j); 1777 __check_store_range(x 1777 __check_store_range(xa, 128 + i, 128 + j); 1778 __check_store_range(x 1778 __check_store_range(xa, 4095 + i, 4095 + j); 1779 __check_store_range(x 1779 __check_store_range(xa, 4096 + i, 4096 + j); 1780 __check_store_range(x 1780 __check_store_range(xa, 123456 + i, 123456 + j); 1781 __check_store_range(x 1781 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1782 } 1782 } 1783 } 1783 } 1784 } 1784 } 1785 1785 1786 #ifdef CONFIG_XARRAY_MULTI 1786 #ifdef CONFIG_XARRAY_MULTI 1787 static void check_split_1(struct xarray *xa, 1787 static void check_split_1(struct xarray *xa, unsigned long index, 1788 unsigned int 1788 unsigned int order, unsigned int new_order) 1789 { 1789 { 1790 XA_STATE_ORDER(xas, xa, index, new_or 1790 XA_STATE_ORDER(xas, xa, index, new_order); 1791 unsigned int i, found; 1791 unsigned int i, found; 1792 void *entry; 1792 void *entry; 1793 1793 1794 xa_store_order(xa, index, order, xa, 1794 xa_store_order(xa, index, order, xa, GFP_KERNEL); 1795 xa_set_mark(xa, index, XA_MARK_1); 1795 xa_set_mark(xa, index, XA_MARK_1); 1796 1796 1797 xas_split_alloc(&xas, xa, order, GFP_ 1797 xas_split_alloc(&xas, xa, order, GFP_KERNEL); 1798 xas_lock(&xas); 1798 xas_lock(&xas); 1799 xas_split(&xas, xa, order); 1799 xas_split(&xas, xa, order); 1800 for (i = 0; i < (1 << order); i += (1 1800 for (i = 0; i < (1 << order); i += (1 << new_order)) 1801 __xa_store(xa, index + i, xa_ 1801 __xa_store(xa, index + i, xa_mk_index(index + i), 0); 1802 xas_unlock(&xas); 1802 xas_unlock(&xas); 1803 1803 1804 for (i = 0; i < (1 << order); i++) { 1804 for (i = 0; i < (1 << order); i++) { 1805 unsigned int val = index + (i 1805 unsigned int val = index + (i & ~((1 << new_order) - 1)); 1806 XA_BUG_ON(xa, xa_load(xa, ind 1806 XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); 1807 } 1807 } 1808 1808 1809 xa_set_mark(xa, index, XA_MARK_0); 1809 xa_set_mark(xa, index, XA_MARK_0); 1810 XA_BUG_ON(xa, !xa_get_mark(xa, index, 1810 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1811 1811 1812 xas_set_order(&xas, index, 0); 1812 xas_set_order(&xas, index, 0); 1813 found = 0; 1813 found = 0; 1814 rcu_read_lock(); 1814 rcu_read_lock(); 1815 xas_for_each_marked(&xas, entry, ULON 1815 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { 1816 found++; 1816 found++; 1817 XA_BUG_ON(xa, xa_is_internal( 1817 XA_BUG_ON(xa, xa_is_internal(entry)); 1818 } 1818 } 1819 rcu_read_unlock(); 1819 rcu_read_unlock(); 1820 XA_BUG_ON(xa, found != 1 << (order - 1820 XA_BUG_ON(xa, found != 1 << (order - new_order)); 1821 1821 1822 xa_destroy(xa); 1822 xa_destroy(xa); 1823 } 1823 } 1824 1824 1825 static noinline void check_split(struct xarra 1825 static noinline void check_split(struct xarray *xa) 1826 { 1826 { 1827 unsigned int order, new_order; 1827 unsigned int order, new_order; 1828 1828 1829 XA_BUG_ON(xa, !xa_empty(xa)); 1829 XA_BUG_ON(xa, !xa_empty(xa)); 1830 1830 1831 for (order = 1; order < 2 * XA_CHUNK_ 1831 for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { 1832 for (new_order = 0; new_order 1832 for (new_order = 0; new_order < order; new_order++) { 1833 check_split_1(xa, 0, 1833 check_split_1(xa, 0, order, new_order); 1834 check_split_1(xa, 1UL 1834 check_split_1(xa, 1UL << order, order, new_order); 1835 check_split_1(xa, 3UL 1835 check_split_1(xa, 3UL << order, order, new_order); 1836 } 1836 } 1837 } 1837 } 1838 } 1838 } 1839 #else 1839 #else 1840 static void check_split(struct xarray *xa) { 1840 static void check_split(struct xarray *xa) { } 1841 #endif 1841 #endif 1842 1842 1843 static void check_align_1(struct xarray *xa, 1843 static void check_align_1(struct xarray *xa, char *name) 1844 { 1844 { 1845 int i; 1845 int i; 1846 unsigned int id; 1846 unsigned int id; 1847 unsigned long index; 1847 unsigned long index; 1848 void *entry; 1848 void *entry; 1849 1849 1850 for (i = 0; i < 8; i++) { 1850 for (i = 0; i < 8; i++) { 1851 XA_BUG_ON(xa, xa_alloc(xa, &i 1851 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1852 GFP_K 1852 GFP_KERNEL) != 0); 1853 XA_BUG_ON(xa, id != i); 1853 XA_BUG_ON(xa, id != i); 1854 } 1854 } 1855 xa_for_each(xa, index, entry) 1855 xa_for_each(xa, index, entry) 1856 XA_BUG_ON(xa, xa_is_err(entry 1856 XA_BUG_ON(xa, xa_is_err(entry)); 1857 xa_destroy(xa); 1857 xa_destroy(xa); 1858 } 1858 } 1859 1859 1860 /* 1860 /* 1861 * We should always be able to store without 1861 * We should always be able to store without allocating memory after 1862 * reserving a slot. 1862 * reserving a slot. 1863 */ 1863 */ 1864 static void check_align_2(struct xarray *xa, 1864 static void check_align_2(struct xarray *xa, char *name) 1865 { 1865 { 1866 int i; 1866 int i; 1867 1867 1868 XA_BUG_ON(xa, !xa_empty(xa)); 1868 XA_BUG_ON(xa, !xa_empty(xa)); 1869 1869 1870 for (i = 0; i < 8; i++) { 1870 for (i = 0; i < 8; i++) { 1871 XA_BUG_ON(xa, xa_store(xa, 0, 1871 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); 1872 xa_erase(xa, 0); 1872 xa_erase(xa, 0); 1873 } 1873 } 1874 1874 1875 for (i = 0; i < 8; i++) { 1875 for (i = 0; i < 8; i++) { 1876 XA_BUG_ON(xa, xa_reserve(xa, 1876 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); 1877 XA_BUG_ON(xa, xa_store(xa, 0, 1877 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); 1878 xa_erase(xa, 0); 1878 xa_erase(xa, 0); 1879 } 1879 } 1880 1880 1881 XA_BUG_ON(xa, !xa_empty(xa)); 1881 XA_BUG_ON(xa, !xa_empty(xa)); 1882 } 1882 } 1883 1883 1884 static noinline void check_align(struct xarra 1884 static noinline void check_align(struct xarray *xa) 1885 { 1885 { 1886 char name[] = "Motorola 68000"; 1886 char name[] = "Motorola 68000"; 1887 1887 1888 check_align_1(xa, name); 1888 check_align_1(xa, name); 1889 check_align_1(xa, name + 1); 1889 check_align_1(xa, name + 1); 1890 check_align_1(xa, name + 2); 1890 check_align_1(xa, name + 2); 1891 check_align_1(xa, name + 3); 1891 check_align_1(xa, name + 3); 1892 check_align_2(xa, name); 1892 check_align_2(xa, name); 1893 } 1893 } 1894 1894 1895 static LIST_HEAD(shadow_nodes); 1895 static LIST_HEAD(shadow_nodes); 1896 1896 1897 static void test_update_node(struct xa_node * 1897 static void test_update_node(struct xa_node *node) 1898 { 1898 { 1899 if (node->count && node->count == nod 1899 if (node->count && node->count == node->nr_values) { 1900 if (list_empty(&node->private 1900 if (list_empty(&node->private_list)) 1901 list_add(&shadow_node 1901 list_add(&shadow_nodes, &node->private_list); 1902 } else { 1902 } else { 1903 if (!list_empty(&node->privat 1903 if (!list_empty(&node->private_list)) 1904 list_del_init(&node-> 1904 list_del_init(&node->private_list); 1905 } 1905 } 1906 } 1906 } 1907 1907 1908 static noinline void shadow_remove(struct xar 1908 static noinline void shadow_remove(struct xarray *xa) 1909 { 1909 { 1910 struct xa_node *node; 1910 struct xa_node *node; 1911 1911 1912 xa_lock(xa); 1912 xa_lock(xa); 1913 while ((node = list_first_entry_or_nu 1913 while ((node = list_first_entry_or_null(&shadow_nodes, 1914 struc 1914 struct xa_node, private_list))) { 1915 XA_BUG_ON(xa, node->array != 1915 XA_BUG_ON(xa, node->array != xa); 1916 list_del_init(&node->private_ 1916 list_del_init(&node->private_list); 1917 xa_delete_node(node, test_upd 1917 xa_delete_node(node, test_update_node); 1918 } 1918 } 1919 xa_unlock(xa); 1919 xa_unlock(xa); 1920 } 1920 } 1921 1921 1922 static noinline void check_workingset(struct 1922 static noinline void check_workingset(struct xarray *xa, unsigned long index) 1923 { 1923 { 1924 XA_STATE(xas, xa, index); 1924 XA_STATE(xas, xa, index); 1925 xas_set_update(&xas, test_update_node 1925 xas_set_update(&xas, test_update_node); 1926 1926 1927 do { 1927 do { 1928 xas_lock(&xas); 1928 xas_lock(&xas); 1929 xas_store(&xas, xa_mk_value(0 1929 xas_store(&xas, xa_mk_value(0)); 1930 xas_next(&xas); 1930 xas_next(&xas); 1931 xas_store(&xas, xa_mk_value(1 1931 xas_store(&xas, xa_mk_value(1)); 1932 xas_unlock(&xas); 1932 xas_unlock(&xas); 1933 } while (xas_nomem(&xas, GFP_KERNEL)) 1933 } while (xas_nomem(&xas, GFP_KERNEL)); 1934 1934 1935 XA_BUG_ON(xa, list_empty(&shadow_node 1935 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1936 1936 1937 xas_lock(&xas); 1937 xas_lock(&xas); 1938 xas_next(&xas); 1938 xas_next(&xas); 1939 xas_store(&xas, &xas); 1939 xas_store(&xas, &xas); 1940 XA_BUG_ON(xa, !list_empty(&shadow_nod 1940 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1941 1941 1942 xas_store(&xas, xa_mk_value(2)); 1942 xas_store(&xas, xa_mk_value(2)); 1943 xas_unlock(&xas); 1943 xas_unlock(&xas); 1944 XA_BUG_ON(xa, list_empty(&shadow_node 1944 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1945 1945 1946 shadow_remove(xa); 1946 shadow_remove(xa); 1947 XA_BUG_ON(xa, !list_empty(&shadow_nod 1947 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1948 XA_BUG_ON(xa, !xa_empty(xa)); 1948 XA_BUG_ON(xa, !xa_empty(xa)); 1949 } 1949 } 1950 1950 1951 /* 1951 /* 1952 * Check that the pointer / value / sibling e 1952 * Check that the pointer / value / sibling entries are accounted the 1953 * way we expect them to be. 1953 * way we expect them to be. 1954 */ 1954 */ 1955 static noinline void check_account(struct xar 1955 static noinline void check_account(struct xarray *xa) 1956 { 1956 { 1957 #ifdef CONFIG_XARRAY_MULTI 1957 #ifdef CONFIG_XARRAY_MULTI 1958 unsigned int order; 1958 unsigned int order; 1959 1959 1960 for (order = 1; order < 12; order++) 1960 for (order = 1; order < 12; order++) { 1961 XA_STATE(xas, xa, 1 << order) 1961 XA_STATE(xas, xa, 1 << order); 1962 1962 1963 xa_store_order(xa, 0, order, 1963 xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1964 rcu_read_lock(); 1964 rcu_read_lock(); 1965 xas_load(&xas); 1965 xas_load(&xas); 1966 XA_BUG_ON(xa, xas.xa_node->co 1966 XA_BUG_ON(xa, xas.xa_node->count == 0); 1967 XA_BUG_ON(xa, xas.xa_node->co 1967 XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1968 XA_BUG_ON(xa, xas.xa_node->nr 1968 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1969 rcu_read_unlock(); 1969 rcu_read_unlock(); 1970 1970 1971 xa_store_order(xa, 1 << order 1971 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1972 GFP_KERNEL); 1972 GFP_KERNEL); 1973 XA_BUG_ON(xa, xas.xa_node->co 1973 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1974 1974 1975 xa_erase(xa, 1 << order); 1975 xa_erase(xa, 1 << order); 1976 XA_BUG_ON(xa, xas.xa_node->nr 1976 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1977 1977 1978 xa_erase(xa, 0); 1978 xa_erase(xa, 0); 1979 XA_BUG_ON(xa, !xa_empty(xa)); 1979 XA_BUG_ON(xa, !xa_empty(xa)); 1980 } 1980 } 1981 #endif 1981 #endif 1982 } 1982 } 1983 1983 1984 static noinline void check_get_order(struct x 1984 static noinline void check_get_order(struct xarray *xa) 1985 { 1985 { 1986 unsigned int max_order = IS_ENABLED(C 1986 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 1987 unsigned int order; 1987 unsigned int order; 1988 unsigned long i, j; 1988 unsigned long i, j; 1989 1989 1990 for (i = 0; i < 3; i++) 1990 for (i = 0; i < 3; i++) 1991 XA_BUG_ON(xa, xa_get_order(xa 1991 XA_BUG_ON(xa, xa_get_order(xa, i) != 0); 1992 1992 1993 for (order = 0; order < max_order; or 1993 for (order = 0; order < max_order; order++) { 1994 for (i = 0; i < 10; i++) { 1994 for (i = 0; i < 10; i++) { 1995 xa_store_order(xa, i 1995 xa_store_order(xa, i << order, order, 1996 xa_mk 1996 xa_mk_index(i << order), GFP_KERNEL); 1997 for (j = i << order; 1997 for (j = i << order; j < (i + 1) << order; j++) 1998 XA_BUG_ON(xa, 1998 XA_BUG_ON(xa, xa_get_order(xa, j) != order); 1999 xa_erase(xa, i << ord 1999 xa_erase(xa, i << order); 2000 } 2000 } 2001 } 2001 } 2002 } 2002 } 2003 2003 2004 static noinline void check_xas_get_order(stru 2004 static noinline void check_xas_get_order(struct xarray *xa) 2005 { 2005 { 2006 XA_STATE(xas, xa, 0); 2006 XA_STATE(xas, xa, 0); 2007 2007 2008 unsigned int max_order = IS_ENABLED(C 2008 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2009 unsigned int order; 2009 unsigned int order; 2010 unsigned long i, j; 2010 unsigned long i, j; 2011 2011 2012 for (order = 0; order < max_order; or 2012 for (order = 0; order < max_order; order++) { 2013 for (i = 0; i < 10; i++) { 2013 for (i = 0; i < 10; i++) { 2014 xas_set_order(&xas, i 2014 xas_set_order(&xas, i << order, order); 2015 do { 2015 do { 2016 xas_lock(&xas 2016 xas_lock(&xas); 2017 xas_store(&xa 2017 xas_store(&xas, xa_mk_value(i)); 2018 xas_unlock(&x 2018 xas_unlock(&xas); 2019 } while (xas_nomem(&x 2019 } while (xas_nomem(&xas, GFP_KERNEL)); 2020 2020 2021 for (j = i << order; 2021 for (j = i << order; j < (i + 1) << order; j++) { 2022 xas_set_order 2022 xas_set_order(&xas, j, 0); 2023 rcu_read_lock 2023 rcu_read_lock(); 2024 xas_load(&xas 2024 xas_load(&xas); 2025 XA_BUG_ON(xa, 2025 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2026 rcu_read_unlo 2026 rcu_read_unlock(); 2027 } 2027 } 2028 2028 2029 xas_lock(&xas); 2029 xas_lock(&xas); 2030 xas_set_order(&xas, i 2030 xas_set_order(&xas, i << order, order); 2031 xas_store(&xas, NULL) 2031 xas_store(&xas, NULL); 2032 xas_unlock(&xas); 2032 xas_unlock(&xas); 2033 } 2033 } 2034 } 2034 } 2035 } 2035 } 2036 2036 2037 static noinline void check_xas_conflict_get_o 2037 static noinline void check_xas_conflict_get_order(struct xarray *xa) 2038 { 2038 { 2039 XA_STATE(xas, xa, 0); 2039 XA_STATE(xas, xa, 0); 2040 2040 2041 void *entry; 2041 void *entry; 2042 int only_once; 2042 int only_once; 2043 unsigned int max_order = IS_ENABLED(C 2043 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2044 unsigned int order; 2044 unsigned int order; 2045 unsigned long i, j, k; 2045 unsigned long i, j, k; 2046 2046 2047 for (order = 0; order < max_order; or 2047 for (order = 0; order < max_order; order++) { 2048 for (i = 0; i < 10; i++) { 2048 for (i = 0; i < 10; i++) { 2049 xas_set_order(&xas, i 2049 xas_set_order(&xas, i << order, order); 2050 do { 2050 do { 2051 xas_lock(&xas 2051 xas_lock(&xas); 2052 xas_store(&xa 2052 xas_store(&xas, xa_mk_value(i)); 2053 xas_unlock(&x 2053 xas_unlock(&xas); 2054 } while (xas_nomem(&x 2054 } while (xas_nomem(&xas, GFP_KERNEL)); 2055 2055 2056 /* 2056 /* 2057 * Ensure xas_get_ord 2057 * Ensure xas_get_order works with xas_for_each_conflict. 2058 */ 2058 */ 2059 j = i << order; 2059 j = i << order; 2060 for (k = 0; k < order 2060 for (k = 0; k < order; k++) { 2061 only_once = 0 2061 only_once = 0; 2062 xas_set_order 2062 xas_set_order(&xas, j + (1 << k), k); 2063 xas_lock(&xas 2063 xas_lock(&xas); 2064 xas_for_each_ 2064 xas_for_each_conflict(&xas, entry) { 2065 XA_BU 2065 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2066 XA_BU 2066 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2067 only_ 2067 only_once++; 2068 } 2068 } 2069 XA_BUG_ON(xa, 2069 XA_BUG_ON(xa, only_once != 1); 2070 xas_unlock(&x 2070 xas_unlock(&xas); 2071 } 2071 } 2072 2072 2073 if (order < max_order 2073 if (order < max_order - 1) { 2074 only_once = 0 2074 only_once = 0; 2075 xas_set_order 2075 xas_set_order(&xas, (i & ~1UL) << order, order + 1); 2076 xas_lock(&xas 2076 xas_lock(&xas); 2077 xas_for_each_ 2077 xas_for_each_conflict(&xas, entry) { 2078 XA_BU 2078 XA_BUG_ON(xa, entry != xa_mk_value(i)); 2079 XA_BU 2079 XA_BUG_ON(xa, xas_get_order(&xas) != order); 2080 only_ 2080 only_once++; 2081 } 2081 } 2082 XA_BUG_ON(xa, 2082 XA_BUG_ON(xa, only_once != 1); 2083 xas_unlock(&x 2083 xas_unlock(&xas); 2084 } 2084 } 2085 2085 2086 xas_set_order(&xas, i 2086 xas_set_order(&xas, i << order, order); 2087 xas_lock(&xas); 2087 xas_lock(&xas); 2088 xas_store(&xas, NULL) 2088 xas_store(&xas, NULL); 2089 xas_unlock(&xas); 2089 xas_unlock(&xas); 2090 } 2090 } 2091 } 2091 } 2092 } 2092 } 2093 2093 2094 2094 2095 static noinline void check_destroy(struct xar 2095 static noinline void check_destroy(struct xarray *xa) 2096 { 2096 { 2097 unsigned long index; 2097 unsigned long index; 2098 2098 2099 XA_BUG_ON(xa, !xa_empty(xa)); 2099 XA_BUG_ON(xa, !xa_empty(xa)); 2100 2100 2101 /* Destroying an empty array is a no- 2101 /* Destroying an empty array is a no-op */ 2102 xa_destroy(xa); 2102 xa_destroy(xa); 2103 XA_BUG_ON(xa, !xa_empty(xa)); 2103 XA_BUG_ON(xa, !xa_empty(xa)); 2104 2104 2105 /* Destroying an array with a single 2105 /* Destroying an array with a single entry */ 2106 for (index = 0; index < 1000; index++ 2106 for (index = 0; index < 1000; index++) { 2107 xa_store_index(xa, index, GFP 2107 xa_store_index(xa, index, GFP_KERNEL); 2108 XA_BUG_ON(xa, xa_empty(xa)); 2108 XA_BUG_ON(xa, xa_empty(xa)); 2109 xa_destroy(xa); 2109 xa_destroy(xa); 2110 XA_BUG_ON(xa, !xa_empty(xa)); 2110 XA_BUG_ON(xa, !xa_empty(xa)); 2111 } 2111 } 2112 2112 2113 /* Destroying an array with a single 2113 /* Destroying an array with a single entry at ULONG_MAX */ 2114 xa_store(xa, ULONG_MAX, xa, GFP_KERNE 2114 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 2115 XA_BUG_ON(xa, xa_empty(xa)); 2115 XA_BUG_ON(xa, xa_empty(xa)); 2116 xa_destroy(xa); 2116 xa_destroy(xa); 2117 XA_BUG_ON(xa, !xa_empty(xa)); 2117 XA_BUG_ON(xa, !xa_empty(xa)); 2118 2118 2119 #ifdef CONFIG_XARRAY_MULTI 2119 #ifdef CONFIG_XARRAY_MULTI 2120 /* Destroying an array with a multi-i 2120 /* Destroying an array with a multi-index entry */ 2121 xa_store_order(xa, 1 << 11, 11, xa, G 2121 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 2122 XA_BUG_ON(xa, xa_empty(xa)); 2122 XA_BUG_ON(xa, xa_empty(xa)); 2123 xa_destroy(xa); 2123 xa_destroy(xa); 2124 XA_BUG_ON(xa, !xa_empty(xa)); 2124 XA_BUG_ON(xa, !xa_empty(xa)); 2125 #endif 2125 #endif 2126 } 2126 } 2127 2127 2128 static DEFINE_XARRAY(array); 2128 static DEFINE_XARRAY(array); 2129 2129 2130 static int xarray_checks(void) 2130 static int xarray_checks(void) 2131 { 2131 { 2132 check_xa_err(&array); 2132 check_xa_err(&array); 2133 check_xas_retry(&array); 2133 check_xas_retry(&array); 2134 check_xa_load(&array); 2134 check_xa_load(&array); 2135 check_xa_mark(&array); 2135 check_xa_mark(&array); 2136 check_xa_shrink(&array); 2136 check_xa_shrink(&array); 2137 check_xas_erase(&array); 2137 check_xas_erase(&array); 2138 check_insert(&array); 2138 check_insert(&array); 2139 check_cmpxchg(&array); 2139 check_cmpxchg(&array); 2140 check_cmpxchg_order(&array); 2140 check_cmpxchg_order(&array); 2141 check_reserve(&array); 2141 check_reserve(&array); 2142 check_reserve(&xa0); 2142 check_reserve(&xa0); 2143 check_multi_store(&array); 2143 check_multi_store(&array); 2144 check_multi_store_advanced(&array); 2144 check_multi_store_advanced(&array); 2145 check_get_order(&array); 2145 check_get_order(&array); 2146 check_xas_get_order(&array); 2146 check_xas_get_order(&array); 2147 check_xas_conflict_get_order(&array); 2147 check_xas_conflict_get_order(&array); 2148 check_xa_alloc(); 2148 check_xa_alloc(); 2149 check_find(&array); 2149 check_find(&array); 2150 check_find_entry(&array); 2150 check_find_entry(&array); 2151 check_pause(&array); 2151 check_pause(&array); 2152 check_account(&array); 2152 check_account(&array); 2153 check_destroy(&array); 2153 check_destroy(&array); 2154 check_move(&array); 2154 check_move(&array); 2155 check_create_range(&array); 2155 check_create_range(&array); 2156 check_store_range(&array); 2156 check_store_range(&array); 2157 check_store_iter(&array); 2157 check_store_iter(&array); 2158 check_align(&xa0); 2158 check_align(&xa0); 2159 check_split(&array); 2159 check_split(&array); 2160 2160 2161 check_workingset(&array, 0); 2161 check_workingset(&array, 0); 2162 check_workingset(&array, 64); 2162 check_workingset(&array, 64); 2163 check_workingset(&array, 4096); 2163 check_workingset(&array, 4096); 2164 2164 2165 printk("XArray: %u of %u tests passed 2165 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 2166 return (tests_run == tests_passed) ? 2166 return (tests_run == tests_passed) ? 0 : -EINVAL; 2167 } 2167 } 2168 2168 2169 static void xarray_exit(void) 2169 static void xarray_exit(void) 2170 { 2170 { 2171 } 2171 } 2172 2172 2173 module_init(xarray_checks); 2173 module_init(xarray_checks); 2174 module_exit(xarray_exit); 2174 module_exit(xarray_exit); 2175 MODULE_AUTHOR("Matthew Wilcox <willy@infradea 2175 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 2176 MODULE_DESCRIPTION("XArray API test module"); 2176 MODULE_DESCRIPTION("XArray API test module"); 2177 MODULE_LICENSE("GPL"); 2177 MODULE_LICENSE("GPL"); 2178 2178
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.