1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM bcache 3 #define TRACE_SYSTEM bcache 4 4 5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE 5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_BCACHE_H 6 #define _TRACE_BCACHE_H 7 7 8 #include <linux/tracepoint.h> 8 #include <linux/tracepoint.h> 9 9 10 DECLARE_EVENT_CLASS(bcache_request, 10 DECLARE_EVENT_CLASS(bcache_request, 11 TP_PROTO(struct bcache_device *d, stru 11 TP_PROTO(struct bcache_device *d, struct bio *bio), 12 TP_ARGS(d, bio), 12 TP_ARGS(d, bio), 13 13 14 TP_STRUCT__entry( 14 TP_STRUCT__entry( 15 __field(dev_t, dev 15 __field(dev_t, dev ) 16 __field(unsigned int, orig_m 16 __field(unsigned int, orig_major ) 17 __field(unsigned int, orig_m 17 __field(unsigned int, orig_minor ) 18 __field(sector_t, sector 18 __field(sector_t, sector ) 19 __field(dev_t, orig_s 19 __field(dev_t, orig_sector ) 20 __field(unsigned int, nr_sec 20 __field(unsigned int, nr_sector ) 21 __array(char, rwbs, 21 __array(char, rwbs, 6 ) 22 ), 22 ), 23 23 24 TP_fast_assign( 24 TP_fast_assign( 25 __entry->dev = bio_ 25 __entry->dev = bio_dev(bio); 26 __entry->orig_major = d->d 26 __entry->orig_major = d->disk->major; 27 __entry->orig_minor = d->d 27 __entry->orig_minor = d->disk->first_minor; 28 __entry->sector = bio- 28 __entry->sector = bio->bi_iter.bi_sector; 29 __entry->orig_sector = bio- 29 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 30 __entry->nr_sector = bio- 30 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 31 blk_fill_rwbs(__entry->rwbs, b 31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 32 ), 32 ), 33 33 34 TP_printk("%d,%d %s %llu + %u (from %d 34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", 35 MAJOR(__entry->dev), MINOR(_ 35 MAJOR(__entry->dev), MINOR(__entry->dev), 36 __entry->rwbs, (unsigned lon 36 __entry->rwbs, (unsigned long long)__entry->sector, 37 __entry->nr_sector, __entry- 37 __entry->nr_sector, __entry->orig_major, __entry->orig_minor, 38 (unsigned long long)__entry- 38 (unsigned long long)__entry->orig_sector) 39 ); 39 ); 40 40 41 DECLARE_EVENT_CLASS(bkey, 41 DECLARE_EVENT_CLASS(bkey, 42 TP_PROTO(struct bkey *k), 42 TP_PROTO(struct bkey *k), 43 TP_ARGS(k), 43 TP_ARGS(k), 44 44 45 TP_STRUCT__entry( 45 TP_STRUCT__entry( 46 __field(u32, size 46 __field(u32, size ) 47 __field(u32, inode 47 __field(u32, inode ) 48 __field(u64, offset 48 __field(u64, offset ) 49 __field(bool, dirty 49 __field(bool, dirty ) 50 ), 50 ), 51 51 52 TP_fast_assign( 52 TP_fast_assign( 53 __entry->inode = KEY_INODE(k) 53 __entry->inode = KEY_INODE(k); 54 __entry->offset = KEY_OFFSET(k 54 __entry->offset = KEY_OFFSET(k); 55 __entry->size = KEY_SIZE(k); 55 __entry->size = KEY_SIZE(k); 56 __entry->dirty = KEY_DIRTY(k) 56 __entry->dirty = KEY_DIRTY(k); 57 ), 57 ), 58 58 59 TP_printk("%u:%llu len %u dirty %u", _ 59 TP_printk("%u:%llu len %u dirty %u", __entry->inode, 60 __entry->offset, __entry->si 60 __entry->offset, __entry->size, __entry->dirty) 61 ); 61 ); 62 62 63 DECLARE_EVENT_CLASS(btree_node, 63 DECLARE_EVENT_CLASS(btree_node, 64 TP_PROTO(struct btree *b), 64 TP_PROTO(struct btree *b), 65 TP_ARGS(b), 65 TP_ARGS(b), 66 66 67 TP_STRUCT__entry( 67 TP_STRUCT__entry( 68 __field(size_t, bucket 68 __field(size_t, bucket ) 69 ), 69 ), 70 70 71 TP_fast_assign( 71 TP_fast_assign( 72 __entry->bucket = PTR_BUCKET_N 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 73 ), 73 ), 74 74 75 TP_printk("bucket %zu", __entry->bucke 75 TP_printk("bucket %zu", __entry->bucket) 76 ); 76 ); 77 77 78 /* request.c */ 78 /* request.c */ 79 79 80 DEFINE_EVENT(bcache_request, bcache_request_st 80 DEFINE_EVENT(bcache_request, bcache_request_start, 81 TP_PROTO(struct bcache_device *d, stru 81 TP_PROTO(struct bcache_device *d, struct bio *bio), 82 TP_ARGS(d, bio) 82 TP_ARGS(d, bio) 83 ); 83 ); 84 84 85 DEFINE_EVENT(bcache_request, bcache_request_en 85 DEFINE_EVENT(bcache_request, bcache_request_end, 86 TP_PROTO(struct bcache_device *d, stru 86 TP_PROTO(struct bcache_device *d, struct bio *bio), 87 TP_ARGS(d, bio) 87 TP_ARGS(d, bio) 88 ); 88 ); 89 89 90 DECLARE_EVENT_CLASS(bcache_bio, 90 DECLARE_EVENT_CLASS(bcache_bio, 91 TP_PROTO(struct bio *bio), 91 TP_PROTO(struct bio *bio), 92 TP_ARGS(bio), 92 TP_ARGS(bio), 93 93 94 TP_STRUCT__entry( 94 TP_STRUCT__entry( 95 __field(dev_t, dev 95 __field(dev_t, dev ) 96 __field(sector_t, sector 96 __field(sector_t, sector ) 97 __field(unsigned int, nr_sec 97 __field(unsigned int, nr_sector ) 98 __array(char, rwbs, 98 __array(char, rwbs, 6 ) 99 ), 99 ), 100 100 101 TP_fast_assign( 101 TP_fast_assign( 102 __entry->dev = bio_ 102 __entry->dev = bio_dev(bio); 103 __entry->sector = bio- 103 __entry->sector = bio->bi_iter.bi_sector; 104 __entry->nr_sector = bio- 104 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 105 blk_fill_rwbs(__entry->rwbs, b 105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 106 ), 106 ), 107 107 108 TP_printk("%d,%d %s %llu + %u", 108 TP_printk("%d,%d %s %llu + %u", 109 MAJOR(__entry->dev), MINOR(_ 109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 110 (unsigned long long)__entry- 110 (unsigned long long)__entry->sector, __entry->nr_sector) 111 ); 111 ); 112 112 113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequent 113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, 114 TP_PROTO(struct bio *bio), 114 TP_PROTO(struct bio *bio), 115 TP_ARGS(bio) 115 TP_ARGS(bio) 116 ); 116 ); 117 117 118 DEFINE_EVENT(bcache_bio, bcache_bypass_congest 118 DEFINE_EVENT(bcache_bio, bcache_bypass_congested, 119 TP_PROTO(struct bio *bio), 119 TP_PROTO(struct bio *bio), 120 TP_ARGS(bio) 120 TP_ARGS(bio) 121 ); 121 ); 122 122 123 TRACE_EVENT(bcache_read, 123 TRACE_EVENT(bcache_read, 124 TP_PROTO(struct bio *bio, bool hit, bo 124 TP_PROTO(struct bio *bio, bool hit, bool bypass), 125 TP_ARGS(bio, hit, bypass), 125 TP_ARGS(bio, hit, bypass), 126 126 127 TP_STRUCT__entry( 127 TP_STRUCT__entry( 128 __field(dev_t, dev 128 __field(dev_t, dev ) 129 __field(sector_t, sector 129 __field(sector_t, sector ) 130 __field(unsigned int, nr_sec 130 __field(unsigned int, nr_sector ) 131 __array(char, rwbs, 131 __array(char, rwbs, 6 ) 132 __field(bool, cache_ 132 __field(bool, cache_hit ) 133 __field(bool, bypass 133 __field(bool, bypass ) 134 ), 134 ), 135 135 136 TP_fast_assign( 136 TP_fast_assign( 137 __entry->dev = bio_ 137 __entry->dev = bio_dev(bio); 138 __entry->sector = bio- 138 __entry->sector = bio->bi_iter.bi_sector; 139 __entry->nr_sector = bio- 139 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 140 blk_fill_rwbs(__entry->rwbs, b 140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 141 __entry->cache_hit = hit; 141 __entry->cache_hit = hit; 142 __entry->bypass = bypass; 142 __entry->bypass = bypass; 143 ), 143 ), 144 144 145 TP_printk("%d,%d %s %llu + %u hit %u 145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u", 146 MAJOR(__entry->dev), MINOR(_ 146 MAJOR(__entry->dev), MINOR(__entry->dev), 147 __entry->rwbs, (unsigned lon 147 __entry->rwbs, (unsigned long long)__entry->sector, 148 __entry->nr_sector, __entry- 148 __entry->nr_sector, __entry->cache_hit, __entry->bypass) 149 ); 149 ); 150 150 151 TRACE_EVENT(bcache_write, 151 TRACE_EVENT(bcache_write, 152 TP_PROTO(struct cache_set *c, u64 inod 152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, 153 bool writeback, bool bypass), 153 bool writeback, bool bypass), 154 TP_ARGS(c, inode, bio, writeback, bypa 154 TP_ARGS(c, inode, bio, writeback, bypass), 155 155 156 TP_STRUCT__entry( 156 TP_STRUCT__entry( 157 __array(char, uuid, 157 __array(char, uuid, 16 ) 158 __field(u64, inode 158 __field(u64, inode ) 159 __field(sector_t, sector 159 __field(sector_t, sector ) 160 __field(unsigned int, nr_sec 160 __field(unsigned int, nr_sector ) 161 __array(char, rwbs, 161 __array(char, rwbs, 6 ) 162 __field(bool, writeb 162 __field(bool, writeback ) 163 __field(bool, bypass 163 __field(bool, bypass ) 164 ), 164 ), 165 165 166 TP_fast_assign( 166 TP_fast_assign( 167 memcpy(__entry->uuid, c->set_u 167 memcpy(__entry->uuid, c->set_uuid, 16); 168 __entry->inode = inod 168 __entry->inode = inode; 169 __entry->sector = bio- 169 __entry->sector = bio->bi_iter.bi_sector; 170 __entry->nr_sector = bio- 170 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 171 blk_fill_rwbs(__entry->rwbs, b 171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 172 __entry->writeback = writeback 172 __entry->writeback = writeback; 173 __entry->bypass = bypass; 173 __entry->bypass = bypass; 174 ), 174 ), 175 175 176 TP_printk("%pU inode %llu %s %llu + % 176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u", 177 __entry->uuid, __entry->inod 177 __entry->uuid, __entry->inode, 178 __entry->rwbs, (unsigned lon 178 __entry->rwbs, (unsigned long long)__entry->sector, 179 __entry->nr_sector, __entry- 179 __entry->nr_sector, __entry->writeback, __entry->bypass) 180 ); 180 ); 181 181 182 DEFINE_EVENT(bcache_bio, bcache_read_retry, 182 DEFINE_EVENT(bcache_bio, bcache_read_retry, 183 TP_PROTO(struct bio *bio), 183 TP_PROTO(struct bio *bio), 184 TP_ARGS(bio) 184 TP_ARGS(bio) 185 ); 185 ); 186 186 187 DEFINE_EVENT(bkey, bcache_cache_insert, 187 DEFINE_EVENT(bkey, bcache_cache_insert, 188 TP_PROTO(struct bkey *k), 188 TP_PROTO(struct bkey *k), 189 TP_ARGS(k) 189 TP_ARGS(k) 190 ); 190 ); 191 191 192 /* Journal */ 192 /* Journal */ 193 193 194 DECLARE_EVENT_CLASS(cache_set, 194 DECLARE_EVENT_CLASS(cache_set, 195 TP_PROTO(struct cache_set *c), 195 TP_PROTO(struct cache_set *c), 196 TP_ARGS(c), 196 TP_ARGS(c), 197 197 198 TP_STRUCT__entry( 198 TP_STRUCT__entry( 199 __array(char, uuid, 199 __array(char, uuid, 16 ) 200 ), 200 ), 201 201 202 TP_fast_assign( 202 TP_fast_assign( 203 memcpy(__entry->uuid, c->set_u 203 memcpy(__entry->uuid, c->set_uuid, 16); 204 ), 204 ), 205 205 206 TP_printk("%pU", __entry->uuid) 206 TP_printk("%pU", __entry->uuid) 207 ); 207 ); 208 208 209 DEFINE_EVENT(bkey, bcache_journal_replay_key, 209 DEFINE_EVENT(bkey, bcache_journal_replay_key, 210 TP_PROTO(struct bkey *k), 210 TP_PROTO(struct bkey *k), 211 TP_ARGS(k) 211 TP_ARGS(k) 212 ); 212 ); 213 213 214 DEFINE_EVENT(cache_set, bcache_journal_full, 214 DEFINE_EVENT(cache_set, bcache_journal_full, 215 TP_PROTO(struct cache_set *c), 215 TP_PROTO(struct cache_set *c), 216 TP_ARGS(c) 216 TP_ARGS(c) 217 ); 217 ); 218 218 219 DEFINE_EVENT(cache_set, bcache_journal_entry_f 219 DEFINE_EVENT(cache_set, bcache_journal_entry_full, 220 TP_PROTO(struct cache_set *c), 220 TP_PROTO(struct cache_set *c), 221 TP_ARGS(c) 221 TP_ARGS(c) 222 ); 222 ); 223 223 224 TRACE_EVENT(bcache_journal_write, 224 TRACE_EVENT(bcache_journal_write, 225 TP_PROTO(struct bio *bio, u32 keys), 225 TP_PROTO(struct bio *bio, u32 keys), 226 TP_ARGS(bio, keys), 226 TP_ARGS(bio, keys), 227 227 228 TP_STRUCT__entry( 228 TP_STRUCT__entry( 229 __field(dev_t, dev 229 __field(dev_t, dev ) 230 __field(sector_t, sector 230 __field(sector_t, sector ) 231 __field(unsigned int, nr_sec 231 __field(unsigned int, nr_sector ) 232 __array(char, rwbs, 232 __array(char, rwbs, 6 ) 233 __field(u32, nr_key 233 __field(u32, nr_keys ) 234 ), 234 ), 235 235 236 TP_fast_assign( 236 TP_fast_assign( 237 __entry->dev = bio_ 237 __entry->dev = bio_dev(bio); 238 __entry->sector = bio- 238 __entry->sector = bio->bi_iter.bi_sector; 239 __entry->nr_sector = bio- 239 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 240 __entry->nr_keys = keys 240 __entry->nr_keys = keys; 241 blk_fill_rwbs(__entry->rwbs, b 241 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 242 ), 242 ), 243 243 244 TP_printk("%d,%d %s %llu + %u keys %u 244 TP_printk("%d,%d %s %llu + %u keys %u", 245 MAJOR(__entry->dev), MINOR(_ 245 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 246 (unsigned long long)__entry- 246 (unsigned long long)__entry->sector, __entry->nr_sector, 247 __entry->nr_keys) 247 __entry->nr_keys) 248 ); 248 ); 249 249 250 /* Btree */ 250 /* Btree */ 251 251 252 DEFINE_EVENT(cache_set, bcache_btree_cache_can 252 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, 253 TP_PROTO(struct cache_set *c), 253 TP_PROTO(struct cache_set *c), 254 TP_ARGS(c) 254 TP_ARGS(c) 255 ); 255 ); 256 256 257 DEFINE_EVENT(btree_node, bcache_btree_read, 257 DEFINE_EVENT(btree_node, bcache_btree_read, 258 TP_PROTO(struct btree *b), 258 TP_PROTO(struct btree *b), 259 TP_ARGS(b) 259 TP_ARGS(b) 260 ); 260 ); 261 261 262 TRACE_EVENT(bcache_btree_write, 262 TRACE_EVENT(bcache_btree_write, 263 TP_PROTO(struct btree *b), 263 TP_PROTO(struct btree *b), 264 TP_ARGS(b), 264 TP_ARGS(b), 265 265 266 TP_STRUCT__entry( 266 TP_STRUCT__entry( 267 __field(size_t, bucket 267 __field(size_t, bucket ) 268 __field(unsigned, block 268 __field(unsigned, block ) 269 __field(unsigned, keys 269 __field(unsigned, keys ) 270 ), 270 ), 271 271 272 TP_fast_assign( 272 TP_fast_assign( 273 __entry->bucket = PTR_BUCKET_N 273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 274 __entry->block = b->written; 274 __entry->block = b->written; 275 __entry->keys = b->keys.set[ 275 __entry->keys = b->keys.set[b->keys.nsets].data->keys; 276 ), 276 ), 277 277 278 TP_printk("bucket %zu written block %u 278 TP_printk("bucket %zu written block %u + %u", 279 __entry->bucket, __entry->bloc 279 __entry->bucket, __entry->block, __entry->keys) 280 ); 280 ); 281 281 282 DEFINE_EVENT(btree_node, bcache_btree_node_all 282 DEFINE_EVENT(btree_node, bcache_btree_node_alloc, 283 TP_PROTO(struct btree *b), 283 TP_PROTO(struct btree *b), 284 TP_ARGS(b) 284 TP_ARGS(b) 285 ); 285 ); 286 286 287 DEFINE_EVENT(cache_set, bcache_btree_node_allo 287 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail, 288 TP_PROTO(struct cache_set *c), 288 TP_PROTO(struct cache_set *c), 289 TP_ARGS(c) 289 TP_ARGS(c) 290 ); 290 ); 291 291 292 DEFINE_EVENT(btree_node, bcache_btree_node_fre 292 DEFINE_EVENT(btree_node, bcache_btree_node_free, 293 TP_PROTO(struct btree *b), 293 TP_PROTO(struct btree *b), 294 TP_ARGS(b) 294 TP_ARGS(b) 295 ); 295 ); 296 296 297 TRACE_EVENT(bcache_btree_gc_coalesce, 297 TRACE_EVENT(bcache_btree_gc_coalesce, 298 TP_PROTO(unsigned nodes), 298 TP_PROTO(unsigned nodes), 299 TP_ARGS(nodes), 299 TP_ARGS(nodes), 300 300 301 TP_STRUCT__entry( 301 TP_STRUCT__entry( 302 __field(unsigned, nodes 302 __field(unsigned, nodes ) 303 ), 303 ), 304 304 305 TP_fast_assign( 305 TP_fast_assign( 306 __entry->nodes = nodes; 306 __entry->nodes = nodes; 307 ), 307 ), 308 308 309 TP_printk("coalesced %u nodes", __entr 309 TP_printk("coalesced %u nodes", __entry->nodes) 310 ); 310 ); 311 311 312 DEFINE_EVENT(cache_set, bcache_gc_start, 312 DEFINE_EVENT(cache_set, bcache_gc_start, 313 TP_PROTO(struct cache_set *c), 313 TP_PROTO(struct cache_set *c), 314 TP_ARGS(c) 314 TP_ARGS(c) 315 ); 315 ); 316 316 317 DEFINE_EVENT(cache_set, bcache_gc_end, 317 DEFINE_EVENT(cache_set, bcache_gc_end, 318 TP_PROTO(struct cache_set *c), 318 TP_PROTO(struct cache_set *c), 319 TP_ARGS(c) 319 TP_ARGS(c) 320 ); 320 ); 321 321 322 DEFINE_EVENT(bkey, bcache_gc_copy, 322 DEFINE_EVENT(bkey, bcache_gc_copy, 323 TP_PROTO(struct bkey *k), 323 TP_PROTO(struct bkey *k), 324 TP_ARGS(k) 324 TP_ARGS(k) 325 ); 325 ); 326 326 327 DEFINE_EVENT(bkey, bcache_gc_copy_collision, 327 DEFINE_EVENT(bkey, bcache_gc_copy_collision, 328 TP_PROTO(struct bkey *k), 328 TP_PROTO(struct bkey *k), 329 TP_ARGS(k) 329 TP_ARGS(k) 330 ); 330 ); 331 331 332 TRACE_EVENT(bcache_btree_insert_key, 332 TRACE_EVENT(bcache_btree_insert_key, 333 TP_PROTO(struct btree *b, struct bkey 333 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), 334 TP_ARGS(b, k, op, status), 334 TP_ARGS(b, k, op, status), 335 335 336 TP_STRUCT__entry( 336 TP_STRUCT__entry( 337 __field(u64, btree_node 337 __field(u64, btree_node ) 338 __field(u32, btree_level 338 __field(u32, btree_level ) 339 __field(u32, inode 339 __field(u32, inode ) 340 __field(u64, offset 340 __field(u64, offset ) 341 __field(u32, size 341 __field(u32, size ) 342 __field(u8, dirty 342 __field(u8, dirty ) 343 __field(u8, op 343 __field(u8, op ) 344 __field(u8, status 344 __field(u8, status ) 345 ), 345 ), 346 346 347 TP_fast_assign( 347 TP_fast_assign( 348 __entry->btree_node = PTR_BUCK 348 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); 349 __entry->btree_level = b->leve 349 __entry->btree_level = b->level; 350 __entry->inode = KEY_INODE(k) 350 __entry->inode = KEY_INODE(k); 351 __entry->offset = KEY_OFFSET(k 351 __entry->offset = KEY_OFFSET(k); 352 __entry->size = KEY_SIZE(k); 352 __entry->size = KEY_SIZE(k); 353 __entry->dirty = KEY_DIRTY(k) 353 __entry->dirty = KEY_DIRTY(k); 354 __entry->op = op; 354 __entry->op = op; 355 __entry->status = status; 355 __entry->status = status; 356 ), 356 ), 357 357 358 TP_printk("%u for %u at %llu(%u): %u:% 358 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u", 359 __entry->status, __entry->op 359 __entry->status, __entry->op, 360 __entry->btree_node, __entry 360 __entry->btree_node, __entry->btree_level, 361 __entry->inode, __entry->off 361 __entry->inode, __entry->offset, 362 __entry->size, __entry->dirt 362 __entry->size, __entry->dirty) 363 ); 363 ); 364 364 365 DECLARE_EVENT_CLASS(btree_split, 365 DECLARE_EVENT_CLASS(btree_split, 366 TP_PROTO(struct btree *b, unsigned key 366 TP_PROTO(struct btree *b, unsigned keys), 367 TP_ARGS(b, keys), 367 TP_ARGS(b, keys), 368 368 369 TP_STRUCT__entry( 369 TP_STRUCT__entry( 370 __field(size_t, bucket 370 __field(size_t, bucket ) 371 __field(unsigned, keys 371 __field(unsigned, keys ) 372 ), 372 ), 373 373 374 TP_fast_assign( 374 TP_fast_assign( 375 __entry->bucket = PTR_BUCKET_N 375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 376 __entry->keys = keys; 376 __entry->keys = keys; 377 ), 377 ), 378 378 379 TP_printk("bucket %zu keys %u", __entr 379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) 380 ); 380 ); 381 381 382 DEFINE_EVENT(btree_split, bcache_btree_node_sp 382 DEFINE_EVENT(btree_split, bcache_btree_node_split, 383 TP_PROTO(struct btree *b, unsigned key 383 TP_PROTO(struct btree *b, unsigned keys), 384 TP_ARGS(b, keys) 384 TP_ARGS(b, keys) 385 ); 385 ); 386 386 387 DEFINE_EVENT(btree_split, bcache_btree_node_co 387 DEFINE_EVENT(btree_split, bcache_btree_node_compact, 388 TP_PROTO(struct btree *b, unsigned key 388 TP_PROTO(struct btree *b, unsigned keys), 389 TP_ARGS(b, keys) 389 TP_ARGS(b, keys) 390 ); 390 ); 391 391 392 DEFINE_EVENT(btree_node, bcache_btree_set_root 392 DEFINE_EVENT(btree_node, bcache_btree_set_root, 393 TP_PROTO(struct btree *b), 393 TP_PROTO(struct btree *b), 394 TP_ARGS(b) 394 TP_ARGS(b) 395 ); 395 ); 396 396 397 TRACE_EVENT(bcache_keyscan, 397 TRACE_EVENT(bcache_keyscan, 398 TP_PROTO(unsigned nr_found, 398 TP_PROTO(unsigned nr_found, 399 unsigned start_inode, uint64_ 399 unsigned start_inode, uint64_t start_offset, 400 unsigned end_inode, uint64_t 400 unsigned end_inode, uint64_t end_offset), 401 TP_ARGS(nr_found, 401 TP_ARGS(nr_found, 402 start_inode, start_offset, 402 start_inode, start_offset, 403 end_inode, end_offset), 403 end_inode, end_offset), 404 404 405 TP_STRUCT__entry( 405 TP_STRUCT__entry( 406 __field(__u32, nr_found 406 __field(__u32, nr_found ) 407 __field(__u32, start_inode 407 __field(__u32, start_inode ) 408 __field(__u64, start_offset 408 __field(__u64, start_offset ) 409 __field(__u32, end_inode 409 __field(__u32, end_inode ) 410 __field(__u64, end_offset 410 __field(__u64, end_offset ) 411 ), 411 ), 412 412 413 TP_fast_assign( 413 TP_fast_assign( 414 __entry->nr_found = nr_f 414 __entry->nr_found = nr_found; 415 __entry->start_inode = star 415 __entry->start_inode = start_inode; 416 __entry->start_offset = star 416 __entry->start_offset = start_offset; 417 __entry->end_inode = end_ 417 __entry->end_inode = end_inode; 418 __entry->end_offset = end_ 418 __entry->end_offset = end_offset; 419 ), 419 ), 420 420 421 TP_printk("found %u keys from %u:%llu 421 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found, 422 __entry->start_inode, __entr 422 __entry->start_inode, __entry->start_offset, 423 __entry->end_inode, __entry- 423 __entry->end_inode, __entry->end_offset) 424 ); 424 ); 425 425 426 /* Allocator */ 426 /* Allocator */ 427 427 428 TRACE_EVENT(bcache_invalidate, 428 TRACE_EVENT(bcache_invalidate, 429 TP_PROTO(struct cache *ca, size_t buck 429 TP_PROTO(struct cache *ca, size_t bucket), 430 TP_ARGS(ca, bucket), 430 TP_ARGS(ca, bucket), 431 431 432 TP_STRUCT__entry( 432 TP_STRUCT__entry( 433 __field(unsigned, sector 433 __field(unsigned, sectors ) 434 __field(dev_t, dev 434 __field(dev_t, dev ) 435 __field(__u64, offset 435 __field(__u64, offset ) 436 ), 436 ), 437 437 438 TP_fast_assign( 438 TP_fast_assign( 439 __entry->dev = ca-> 439 __entry->dev = ca->bdev->bd_dev; 440 __entry->offset = buck 440 __entry->offset = bucket << ca->set->bucket_bits; 441 __entry->sectors = GC_S 441 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); 442 ), 442 ), 443 443 444 TP_printk("invalidated %u sectors at % 444 TP_printk("invalidated %u sectors at %d,%d sector=%llu", 445 __entry->sectors, MAJOR(__en 445 __entry->sectors, MAJOR(__entry->dev), 446 MINOR(__entry->dev), __entry 446 MINOR(__entry->dev), __entry->offset) 447 ); 447 ); 448 448 449 TRACE_EVENT(bcache_alloc, 449 TRACE_EVENT(bcache_alloc, 450 TP_PROTO(struct cache *ca, size_t buck 450 TP_PROTO(struct cache *ca, size_t bucket), 451 TP_ARGS(ca, bucket), 451 TP_ARGS(ca, bucket), 452 452 453 TP_STRUCT__entry( 453 TP_STRUCT__entry( 454 __field(dev_t, dev 454 __field(dev_t, dev ) 455 __field(__u64, offset 455 __field(__u64, offset ) 456 ), 456 ), 457 457 458 TP_fast_assign( 458 TP_fast_assign( 459 __entry->dev = ca-> 459 __entry->dev = ca->bdev->bd_dev; 460 __entry->offset = buck 460 __entry->offset = bucket << ca->set->bucket_bits; 461 ), 461 ), 462 462 463 TP_printk("allocated %d,%d sector=%llu 463 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), 464 MINOR(__entry->dev), __entry 464 MINOR(__entry->dev), __entry->offset) 465 ); 465 ); 466 466 467 TRACE_EVENT(bcache_alloc_fail, 467 TRACE_EVENT(bcache_alloc_fail, 468 TP_PROTO(struct cache *ca, unsigned re 468 TP_PROTO(struct cache *ca, unsigned reserve), 469 TP_ARGS(ca, reserve), 469 TP_ARGS(ca, reserve), 470 470 471 TP_STRUCT__entry( 471 TP_STRUCT__entry( 472 __field(dev_t, dev 472 __field(dev_t, dev ) 473 __field(unsigned, free 473 __field(unsigned, free ) 474 __field(unsigned, free_i 474 __field(unsigned, free_inc ) 475 __field(unsigned, blocke 475 __field(unsigned, blocked ) 476 ), 476 ), 477 477 478 TP_fast_assign( 478 TP_fast_assign( 479 __entry->dev = ca-> 479 __entry->dev = ca->bdev->bd_dev; 480 __entry->free = fifo 480 __entry->free = fifo_used(&ca->free[reserve]); 481 __entry->free_inc = fifo 481 __entry->free_inc = fifo_used(&ca->free_inc); 482 __entry->blocked = atom 482 __entry->blocked = atomic_read(&ca->set->prio_blocked); 483 ), 483 ), 484 484 485 TP_printk("alloc fail %d,%d free %u fr 485 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u", 486 MAJOR(__entry->dev), MINOR(_ 486 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free, 487 __entry->free_inc, __entry-> 487 __entry->free_inc, __entry->blocked) 488 ); 488 ); 489 489 490 /* Background writeback */ 490 /* Background writeback */ 491 491 492 DEFINE_EVENT(bkey, bcache_writeback, 492 DEFINE_EVENT(bkey, bcache_writeback, 493 TP_PROTO(struct bkey *k), 493 TP_PROTO(struct bkey *k), 494 TP_ARGS(k) 494 TP_ARGS(k) 495 ); 495 ); 496 496 497 DEFINE_EVENT(bkey, bcache_writeback_collision, 497 DEFINE_EVENT(bkey, bcache_writeback_collision, 498 TP_PROTO(struct bkey *k), 498 TP_PROTO(struct bkey *k), 499 TP_ARGS(k) 499 TP_ARGS(k) 500 ); 500 ); 501 501 502 #endif /* _TRACE_BCACHE_H */ 502 #endif /* _TRACE_BCACHE_H */ 503 503 504 /* This part must be outside protection */ 504 /* This part must be outside protection */ 505 #include <trace/define_trace.h> 505 #include <trace/define_trace.h> 506 506
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.