~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/tests.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #ifdef CONFIG_BCACHEFS_TESTS
  3 
  4 #include "bcachefs.h"
  5 #include "btree_update.h"
  6 #include "journal_reclaim.h"
  7 #include "snapshot.h"
  8 #include "tests.h"
  9 
 10 #include "linux/kthread.h"
 11 #include "linux/random.h"
 12 
 13 static void delete_test_keys(struct bch_fs *c)
 14 {
 15         int ret;
 16 
 17         ret = bch2_btree_delete_range(c, BTREE_ID_extents,
 18                                       SPOS(0, 0, U32_MAX),
 19                                       POS(0, U64_MAX),
 20                                       0, NULL);
 21         BUG_ON(ret);
 22 
 23         ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
 24                                       SPOS(0, 0, U32_MAX),
 25                                       POS(0, U64_MAX),
 26                                       0, NULL);
 27         BUG_ON(ret);
 28 }
 29 
 30 /* unit tests */
 31 
 32 static int test_delete(struct bch_fs *c, u64 nr)
 33 {
 34         struct btree_trans *trans = bch2_trans_get(c);
 35         struct btree_iter iter;
 36         struct bkey_i_cookie k;
 37         int ret;
 38 
 39         bkey_cookie_init(&k.k_i);
 40         k.k.p.snapshot = U32_MAX;
 41 
 42         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
 43                              BTREE_ITER_intent);
 44 
 45         ret = commit_do(trans, NULL, NULL, 0,
 46                 bch2_btree_iter_traverse(&iter) ?:
 47                 bch2_trans_update(trans, &iter, &k.k_i, 0));
 48         bch_err_msg(c, ret, "update error");
 49         if (ret)
 50                 goto err;
 51 
 52         pr_info("deleting once");
 53         ret = commit_do(trans, NULL, NULL, 0,
 54                 bch2_btree_iter_traverse(&iter) ?:
 55                 bch2_btree_delete_at(trans, &iter, 0));
 56         bch_err_msg(c, ret, "delete error (first)");
 57         if (ret)
 58                 goto err;
 59 
 60         pr_info("deleting twice");
 61         ret = commit_do(trans, NULL, NULL, 0,
 62                 bch2_btree_iter_traverse(&iter) ?:
 63                 bch2_btree_delete_at(trans, &iter, 0));
 64         bch_err_msg(c, ret, "delete error (second)");
 65         if (ret)
 66                 goto err;
 67 err:
 68         bch2_trans_iter_exit(trans, &iter);
 69         bch2_trans_put(trans);
 70         return ret;
 71 }
 72 
 73 static int test_delete_written(struct bch_fs *c, u64 nr)
 74 {
 75         struct btree_trans *trans = bch2_trans_get(c);
 76         struct btree_iter iter;
 77         struct bkey_i_cookie k;
 78         int ret;
 79 
 80         bkey_cookie_init(&k.k_i);
 81         k.k.p.snapshot = U32_MAX;
 82 
 83         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
 84                              BTREE_ITER_intent);
 85 
 86         ret = commit_do(trans, NULL, NULL, 0,
 87                 bch2_btree_iter_traverse(&iter) ?:
 88                 bch2_trans_update(trans, &iter, &k.k_i, 0));
 89         bch_err_msg(c, ret, "update error");
 90         if (ret)
 91                 goto err;
 92 
 93         bch2_trans_unlock(trans);
 94         bch2_journal_flush_all_pins(&c->journal);
 95 
 96         ret = commit_do(trans, NULL, NULL, 0,
 97                 bch2_btree_iter_traverse(&iter) ?:
 98                 bch2_btree_delete_at(trans, &iter, 0));
 99         bch_err_msg(c, ret, "delete error");
100         if (ret)
101                 goto err;
102 err:
103         bch2_trans_iter_exit(trans, &iter);
104         bch2_trans_put(trans);
105         return ret;
106 }
107 
108 static int test_iterate(struct bch_fs *c, u64 nr)
109 {
110         u64 i;
111         int ret = 0;
112 
113         delete_test_keys(c);
114 
115         pr_info("inserting test keys");
116 
117         for (i = 0; i < nr; i++) {
118                 struct bkey_i_cookie ck;
119 
120                 bkey_cookie_init(&ck.k_i);
121                 ck.k.p.offset = i;
122                 ck.k.p.snapshot = U32_MAX;
123 
124                 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0, 0);
125                 bch_err_msg(c, ret, "insert error");
126                 if (ret)
127                         return ret;
128         }
129 
130         pr_info("iterating forwards");
131         i = 0;
132 
133         ret = bch2_trans_run(c,
134                 for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
135                                         SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
136                                         0, k, ({
137                         BUG_ON(k.k->p.offset != i++);
138                         0;
139                 })));
140         bch_err_msg(c, ret, "error iterating forwards");
141         if (ret)
142                 return ret;
143 
144         BUG_ON(i != nr);
145 
146         pr_info("iterating backwards");
147 
148         ret = bch2_trans_run(c,
149                 for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
150                                 SPOS(0, U64_MAX, U32_MAX), 0, k, ({
151                         BUG_ON(k.k->p.offset != --i);
152                         0;
153                 })));
154         bch_err_msg(c, ret, "error iterating backwards");
155         if (ret)
156                 return ret;
157 
158         BUG_ON(i);
159         return 0;
160 }
161 
162 static int test_iterate_extents(struct bch_fs *c, u64 nr)
163 {
164         u64 i;
165         int ret = 0;
166 
167         delete_test_keys(c);
168 
169         pr_info("inserting test extents");
170 
171         for (i = 0; i < nr; i += 8) {
172                 struct bkey_i_cookie ck;
173 
174                 bkey_cookie_init(&ck.k_i);
175                 ck.k.p.offset = i + 8;
176                 ck.k.p.snapshot = U32_MAX;
177                 ck.k.size = 8;
178 
179                 ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0, 0);
180                 bch_err_msg(c, ret, "insert error");
181                 if (ret)
182                         return ret;
183         }
184 
185         pr_info("iterating forwards");
186         i = 0;
187 
188         ret = bch2_trans_run(c,
189                 for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
190                                         SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
191                                         0, k, ({
192                         BUG_ON(bkey_start_offset(k.k) != i);
193                         i = k.k->p.offset;
194                         0;
195                 })));
196         bch_err_msg(c, ret, "error iterating forwards");
197         if (ret)
198                 return ret;
199 
200         BUG_ON(i != nr);
201 
202         pr_info("iterating backwards");
203 
204         ret = bch2_trans_run(c,
205                 for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
206                                 SPOS(0, U64_MAX, U32_MAX), 0, k, ({
207                         BUG_ON(k.k->p.offset != i);
208                         i = bkey_start_offset(k.k);
209                         0;
210                 })));
211         bch_err_msg(c, ret, "error iterating backwards");
212         if (ret)
213                 return ret;
214 
215         BUG_ON(i);
216         return 0;
217 }
218 
219 static int test_iterate_slots(struct bch_fs *c, u64 nr)
220 {
221         u64 i;
222         int ret = 0;
223 
224         delete_test_keys(c);
225 
226         pr_info("inserting test keys");
227 
228         for (i = 0; i < nr; i++) {
229                 struct bkey_i_cookie ck;
230 
231                 bkey_cookie_init(&ck.k_i);
232                 ck.k.p.offset = i * 2;
233                 ck.k.p.snapshot = U32_MAX;
234 
235                 ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0, 0);
236                 bch_err_msg(c, ret, "insert error");
237                 if (ret)
238                         return ret;
239         }
240 
241         pr_info("iterating forwards");
242         i = 0;
243 
244         ret = bch2_trans_run(c,
245                 for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
246                                           SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
247                                           0, k, ({
248                         BUG_ON(k.k->p.offset != i);
249                         i += 2;
250                         0;
251                 })));
252         bch_err_msg(c, ret, "error iterating forwards");
253         if (ret)
254                 return ret;
255 
256         BUG_ON(i != nr * 2);
257 
258         pr_info("iterating forwards by slots");
259         i = 0;
260 
261         ret = bch2_trans_run(c,
262                 for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
263                                         SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
264                                         BTREE_ITER_slots, k, ({
265                         if (i >= nr * 2)
266                                 break;
267 
268                         BUG_ON(k.k->p.offset != i);
269                         BUG_ON(bkey_deleted(k.k) != (i & 1));
270 
271                         i++;
272                         0;
273                 })));
274         bch_err_msg(c, ret, "error iterating forwards by slots");
275         return ret;
276 }
277 
278 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
279 {
280         u64 i;
281         int ret = 0;
282 
283         delete_test_keys(c);
284 
285         pr_info("inserting test keys");
286 
287         for (i = 0; i < nr; i += 16) {
288                 struct bkey_i_cookie ck;
289 
290                 bkey_cookie_init(&ck.k_i);
291                 ck.k.p.offset = i + 16;
292                 ck.k.p.snapshot = U32_MAX;
293                 ck.k.size = 8;
294 
295                 ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0, 0);
296                 bch_err_msg(c, ret, "insert error");
297                 if (ret)
298                         return ret;
299         }
300 
301         pr_info("iterating forwards");
302         i = 0;
303 
304         ret = bch2_trans_run(c,
305                 for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
306                                         SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
307                                         0, k, ({
308                         BUG_ON(bkey_start_offset(k.k) != i + 8);
309                         BUG_ON(k.k->size != 8);
310                         i += 16;
311                         0;
312                 })));
313         bch_err_msg(c, ret, "error iterating forwards");
314         if (ret)
315                 return ret;
316 
317         BUG_ON(i != nr);
318 
319         pr_info("iterating forwards by slots");
320         i = 0;
321 
322         ret = bch2_trans_run(c,
323                 for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
324                                         SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
325                                         BTREE_ITER_slots, k, ({
326                         if (i == nr)
327                                 break;
328                         BUG_ON(bkey_deleted(k.k) != !(i % 16));
329 
330                         BUG_ON(bkey_start_offset(k.k) != i);
331                         BUG_ON(k.k->size != 8);
332                         i = k.k->p.offset;
333                         0;
334                 })));
335         bch_err_msg(c, ret, "error iterating forwards by slots");
336         return ret;
337 }
338 
339 /*
340  * XXX: we really want to make sure we've got a btree with depth > 0 for these
341  * tests
342  */
343 static int test_peek_end(struct bch_fs *c, u64 nr)
344 {
345         struct btree_trans *trans = bch2_trans_get(c);
346         struct btree_iter iter;
347         struct bkey_s_c k;
348 
349         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
350                              SPOS(0, 0, U32_MAX), 0);
351 
352         lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
353         BUG_ON(k.k);
354 
355         lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
356         BUG_ON(k.k);
357 
358         bch2_trans_iter_exit(trans, &iter);
359         bch2_trans_put(trans);
360         return 0;
361 }
362 
363 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
364 {
365         struct btree_trans *trans = bch2_trans_get(c);
366         struct btree_iter iter;
367         struct bkey_s_c k;
368 
369         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
370                              SPOS(0, 0, U32_MAX), 0);
371 
372         lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
373         BUG_ON(k.k);
374 
375         lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
376         BUG_ON(k.k);
377 
378         bch2_trans_iter_exit(trans, &iter);
379         bch2_trans_put(trans);
380         return 0;
381 }
382 
383 /* extent unit tests */
384 
385 static u64 test_version;
386 
387 static int insert_test_extent(struct bch_fs *c,
388                               u64 start, u64 end)
389 {
390         struct bkey_i_cookie k;
391         int ret;
392 
393         bkey_cookie_init(&k.k_i);
394         k.k_i.k.p.offset = end;
395         k.k_i.k.p.snapshot = U32_MAX;
396         k.k_i.k.size = end - start;
397         k.k_i.k.version.lo = test_version++;
398 
399         ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0);
400         bch_err_fn(c, ret);
401         return ret;
402 }
403 
404 static int __test_extent_overwrite(struct bch_fs *c,
405                                     u64 e1_start, u64 e1_end,
406                                     u64 e2_start, u64 e2_end)
407 {
408         int ret;
409 
410         ret   = insert_test_extent(c, e1_start, e1_end) ?:
411                 insert_test_extent(c, e2_start, e2_end);
412 
413         delete_test_keys(c);
414         return ret;
415 }
416 
417 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
418 {
419         return  __test_extent_overwrite(c, 0, 64, 0, 32) ?:
420                 __test_extent_overwrite(c, 8, 64, 0, 32);
421 }
422 
423 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
424 {
425         return  __test_extent_overwrite(c, 0, 64, 32, 64) ?:
426                 __test_extent_overwrite(c, 0, 64, 32, 72);
427 }
428 
429 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
430 {
431         return __test_extent_overwrite(c, 0, 64, 32, 40);
432 }
433 
434 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
435 {
436         return  __test_extent_overwrite(c, 32, 64,  0,  64) ?:
437                 __test_extent_overwrite(c, 32, 64,  0, 128) ?:
438                 __test_extent_overwrite(c, 32, 64, 32,  64) ?:
439                 __test_extent_overwrite(c, 32, 64, 32, 128);
440 }
441 
442 static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid)
443 {
444         struct bkey_i_cookie k;
445         int ret;
446 
447         bkey_cookie_init(&k.k_i);
448         k.k_i.k.p.inode = inum;
449         k.k_i.k.p.offset = start + len;
450         k.k_i.k.p.snapshot = snapid;
451         k.k_i.k.size = len;
452 
453         ret = bch2_trans_do(c, NULL, NULL, 0,
454                 bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
455                                             BTREE_UPDATE_internal_snapshot_node));
456         bch_err_fn(c, ret);
457         return ret;
458 }
459 
460 static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
461 {
462         return  insert_test_overlapping_extent(c, inum,  0, 16, U32_MAX - 2) ?: /* overwrite entire */
463                 insert_test_overlapping_extent(c, inum,  2,  8, U32_MAX - 2) ?:
464                 insert_test_overlapping_extent(c, inum,  4,  4, U32_MAX) ?:
465                 insert_test_overlapping_extent(c, inum, 32,  8, U32_MAX - 2) ?: /* overwrite front/back */
466                 insert_test_overlapping_extent(c, inum, 36,  8, U32_MAX) ?:
467                 insert_test_overlapping_extent(c, inum, 60,  8, U32_MAX - 2) ?:
468                 insert_test_overlapping_extent(c, inum, 64,  8, U32_MAX);
469 }
470 
471 /* snapshot unit tests */
472 
473 /* Test skipping over keys in unrelated snapshots: */
474 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
475 {
476         struct btree_trans *trans;
477         struct btree_iter iter;
478         struct bkey_s_c k;
479         struct bkey_i_cookie cookie;
480         int ret;
481 
482         bkey_cookie_init(&cookie.k_i);
483         cookie.k.p.snapshot = snapid_hi;
484         ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
485         if (ret)
486                 return ret;
487 
488         trans = bch2_trans_get(c);
489         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
490                              SPOS(0, 0, snapid_lo), 0);
491         lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
492 
493         BUG_ON(k.k->p.snapshot != U32_MAX);
494 
495         bch2_trans_iter_exit(trans, &iter);
496         bch2_trans_put(trans);
497         return ret;
498 }
499 
500 static int test_snapshots(struct bch_fs *c, u64 nr)
501 {
502         struct bkey_i_cookie cookie;
503         u32 snapids[2];
504         u32 snapid_subvols[2] = { 1, 1 };
505         int ret;
506 
507         bkey_cookie_init(&cookie.k_i);
508         cookie.k.p.snapshot = U32_MAX;
509         ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
510         if (ret)
511                 return ret;
512 
513         ret = bch2_trans_do(c, NULL, NULL, 0,
514                       bch2_snapshot_node_create(trans, U32_MAX,
515                                                 snapids,
516                                                 snapid_subvols,
517                                                 2));
518         if (ret)
519                 return ret;
520 
521         if (snapids[0] > snapids[1])
522                 swap(snapids[0], snapids[1]);
523 
524         ret = test_snapshot_filter(c, snapids[0], snapids[1]);
525         bch_err_msg(c, ret, "from test_snapshot_filter");
526         return ret;
527 }
528 
529 /* perf tests */
530 
531 static u64 test_rand(void)
532 {
533         u64 v;
534 
535         get_random_bytes(&v, sizeof(v));
536         return v;
537 }
538 
539 static int rand_insert(struct bch_fs *c, u64 nr)
540 {
541         struct btree_trans *trans = bch2_trans_get(c);
542         struct bkey_i_cookie k;
543         int ret = 0;
544         u64 i;
545 
546         for (i = 0; i < nr; i++) {
547                 bkey_cookie_init(&k.k_i);
548                 k.k.p.offset = test_rand();
549                 k.k.p.snapshot = U32_MAX;
550 
551                 ret = commit_do(trans, NULL, NULL, 0,
552                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0));
553                 if (ret)
554                         break;
555         }
556 
557         bch2_trans_put(trans);
558         return ret;
559 }
560 
561 static int rand_insert_multi(struct bch_fs *c, u64 nr)
562 {
563         struct btree_trans *trans = bch2_trans_get(c);
564         struct bkey_i_cookie k[8];
565         int ret = 0;
566         unsigned j;
567         u64 i;
568 
569         for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
570                 for (j = 0; j < ARRAY_SIZE(k); j++) {
571                         bkey_cookie_init(&k[j].k_i);
572                         k[j].k.p.offset = test_rand();
573                         k[j].k.p.snapshot = U32_MAX;
574                 }
575 
576                 ret = commit_do(trans, NULL, NULL, 0,
577                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?:
578                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?:
579                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?:
580                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?:
581                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?:
582                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
583                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
584                         bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0));
585                 if (ret)
586                         break;
587         }
588 
589         bch2_trans_put(trans);
590         return ret;
591 }
592 
593 static int rand_lookup(struct bch_fs *c, u64 nr)
594 {
595         struct btree_trans *trans = bch2_trans_get(c);
596         struct btree_iter iter;
597         struct bkey_s_c k;
598         int ret = 0;
599         u64 i;
600 
601         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
602                              SPOS(0, 0, U32_MAX), 0);
603 
604         for (i = 0; i < nr; i++) {
605                 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
606 
607                 lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
608                 ret = bkey_err(k);
609                 if (ret)
610                         break;
611         }
612 
613         bch2_trans_iter_exit(trans, &iter);
614         bch2_trans_put(trans);
615         return ret;
616 }
617 
618 static int rand_mixed_trans(struct btree_trans *trans,
619                             struct btree_iter *iter,
620                             struct bkey_i_cookie *cookie,
621                             u64 i, u64 pos)
622 {
623         struct bkey_s_c k;
624         int ret;
625 
626         bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
627 
628         k = bch2_btree_iter_peek(iter);
629         ret = bkey_err(k);
630         bch_err_msg(trans->c, ret, "lookup error");
631         if (ret)
632                 return ret;
633 
634         if (!(i & 3) && k.k) {
635                 bkey_cookie_init(&cookie->k_i);
636                 cookie->k.p = iter->pos;
637                 ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
638         }
639 
640         return ret;
641 }
642 
643 static int rand_mixed(struct bch_fs *c, u64 nr)
644 {
645         struct btree_trans *trans = bch2_trans_get(c);
646         struct btree_iter iter;
647         struct bkey_i_cookie cookie;
648         int ret = 0;
649         u64 i, rand;
650 
651         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
652                              SPOS(0, 0, U32_MAX), 0);
653 
654         for (i = 0; i < nr; i++) {
655                 rand = test_rand();
656                 ret = commit_do(trans, NULL, NULL, 0,
657                         rand_mixed_trans(trans, &iter, &cookie, i, rand));
658                 if (ret)
659                         break;
660         }
661 
662         bch2_trans_iter_exit(trans, &iter);
663         bch2_trans_put(trans);
664         return ret;
665 }
666 
667 static int __do_delete(struct btree_trans *trans, struct bpos pos)
668 {
669         struct btree_iter iter;
670         struct bkey_s_c k;
671         int ret = 0;
672 
673         bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
674                              BTREE_ITER_intent);
675         k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX));
676         ret = bkey_err(k);
677         if (ret)
678                 goto err;
679 
680         if (!k.k)
681                 goto err;
682 
683         ret = bch2_btree_delete_at(trans, &iter, 0);
684 err:
685         bch2_trans_iter_exit(trans, &iter);
686         return ret;
687 }
688 
689 static int rand_delete(struct bch_fs *c, u64 nr)
690 {
691         struct btree_trans *trans = bch2_trans_get(c);
692         int ret = 0;
693         u64 i;
694 
695         for (i = 0; i < nr; i++) {
696                 struct bpos pos = SPOS(0, test_rand(), U32_MAX);
697 
698                 ret = commit_do(trans, NULL, NULL, 0,
699                         __do_delete(trans, pos));
700                 if (ret)
701                         break;
702         }
703 
704         bch2_trans_put(trans);
705         return ret;
706 }
707 
708 static int seq_insert(struct bch_fs *c, u64 nr)
709 {
710         struct bkey_i_cookie insert;
711 
712         bkey_cookie_init(&insert.k_i);
713 
714         return bch2_trans_run(c,
715                 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
716                                         SPOS(0, 0, U32_MAX),
717                                         BTREE_ITER_slots|BTREE_ITER_intent, k,
718                                         NULL, NULL, 0, ({
719                         if (iter.pos.offset >= nr)
720                                 break;
721                         insert.k.p = iter.pos;
722                         bch2_trans_update(trans, &iter, &insert.k_i, 0);
723                 })));
724 }
725 
726 static int seq_lookup(struct bch_fs *c, u64 nr)
727 {
728         return bch2_trans_run(c,
729                 for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
730                                   SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
731                                   0, k,
732                 0));
733 }
734 
735 static int seq_overwrite(struct bch_fs *c, u64 nr)
736 {
737         return bch2_trans_run(c,
738                 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
739                                         SPOS(0, 0, U32_MAX),
740                                         BTREE_ITER_intent, k,
741                                         NULL, NULL, 0, ({
742                         struct bkey_i_cookie u;
743 
744                         bkey_reassemble(&u.k_i, k);
745                         bch2_trans_update(trans, &iter, &u.k_i, 0);
746                 })));
747 }
748 
749 static int seq_delete(struct bch_fs *c, u64 nr)
750 {
751         return bch2_btree_delete_range(c, BTREE_ID_xattrs,
752                                       SPOS(0, 0, U32_MAX),
753                                       POS(0, U64_MAX),
754                                       0, NULL);
755 }
756 
757 typedef int (*perf_test_fn)(struct bch_fs *, u64);
758 
759 struct test_job {
760         struct bch_fs                   *c;
761         u64                             nr;
762         unsigned                        nr_threads;
763         perf_test_fn                    fn;
764 
765         atomic_t                        ready;
766         wait_queue_head_t               ready_wait;
767 
768         atomic_t                        done;
769         struct completion               done_completion;
770 
771         u64                             start;
772         u64                             finish;
773         int                             ret;
774 };
775 
776 static int btree_perf_test_thread(void *data)
777 {
778         struct test_job *j = data;
779         int ret;
780 
781         if (atomic_dec_and_test(&j->ready)) {
782                 wake_up(&j->ready_wait);
783                 j->start = sched_clock();
784         } else {
785                 wait_event(j->ready_wait, !atomic_read(&j->ready));
786         }
787 
788         ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
789         if (ret) {
790                 bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
791                 j->ret = ret;
792         }
793 
794         if (atomic_dec_and_test(&j->done)) {
795                 j->finish = sched_clock();
796                 complete(&j->done_completion);
797         }
798 
799         return 0;
800 }
801 
802 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
803                          u64 nr, unsigned nr_threads)
804 {
805         struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
806         char name_buf[20];
807         struct printbuf nr_buf = PRINTBUF;
808         struct printbuf per_sec_buf = PRINTBUF;
809         unsigned i;
810         u64 time;
811 
812         atomic_set(&j.ready, nr_threads);
813         init_waitqueue_head(&j.ready_wait);
814 
815         atomic_set(&j.done, nr_threads);
816         init_completion(&j.done_completion);
817 
818 #define perf_test(_test)                                \
819         if (!strcmp(testname, #_test)) j.fn = _test
820 
821         perf_test(rand_insert);
822         perf_test(rand_insert_multi);
823         perf_test(rand_lookup);
824         perf_test(rand_mixed);
825         perf_test(rand_delete);
826 
827         perf_test(seq_insert);
828         perf_test(seq_lookup);
829         perf_test(seq_overwrite);
830         perf_test(seq_delete);
831 
832         /* a unit test, not a perf test: */
833         perf_test(test_delete);
834         perf_test(test_delete_written);
835         perf_test(test_iterate);
836         perf_test(test_iterate_extents);
837         perf_test(test_iterate_slots);
838         perf_test(test_iterate_slots_extents);
839         perf_test(test_peek_end);
840         perf_test(test_peek_end_extents);
841 
842         perf_test(test_extent_overwrite_front);
843         perf_test(test_extent_overwrite_back);
844         perf_test(test_extent_overwrite_middle);
845         perf_test(test_extent_overwrite_all);
846         perf_test(test_extent_create_overlapping);
847 
848         perf_test(test_snapshots);
849 
850         if (!j.fn) {
851                 pr_err("unknown test %s", testname);
852                 return -EINVAL;
853         }
854 
855         //pr_info("running test %s:", testname);
856 
857         if (nr_threads == 1)
858                 btree_perf_test_thread(&j);
859         else
860                 for (i = 0; i < nr_threads; i++)
861                         kthread_run(btree_perf_test_thread, &j,
862                                     "bcachefs perf test[%u]", i);
863 
864         while (wait_for_completion_interruptible(&j.done_completion))
865                 ;
866 
867         time = j.finish - j.start;
868 
869         scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
870         prt_human_readable_u64(&nr_buf, nr);
871         prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
872         printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
873                 name_buf, nr_buf.buf, nr_threads,
874                 div_u64(time, NSEC_PER_SEC),
875                 div_u64(time * nr_threads, nr),
876                 per_sec_buf.buf);
877         printbuf_exit(&per_sec_buf);
878         printbuf_exit(&nr_buf);
879         return j.ret;
880 }
881 
882 #endif /* CONFIG_BCACHEFS_TESTS */
883 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php