^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #undef TRACE_SYSTEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define TRACE_SYSTEM bcache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define _TRACE_BCACHE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/tracepoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) DECLARE_EVENT_CLASS(bcache_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) TP_PROTO(struct bcache_device *d, struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) TP_ARGS(d, bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) __field(unsigned int, orig_major )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) __field(unsigned int, orig_minor )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) __field(sector_t, sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) __field(dev_t, orig_sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) __field(unsigned int, nr_sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) __array(char, rwbs, 6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) __entry->dev = bio_dev(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __entry->orig_major = d->disk->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __entry->orig_minor = d->disk->first_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __entry->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __entry->orig_sector = bio->bi_iter.bi_sector - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __entry->nr_sector = bio->bi_iter.bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) MAJOR(__entry->dev), MINOR(__entry->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __entry->rwbs, (unsigned long long)__entry->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) (unsigned long long)__entry->orig_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) DECLARE_EVENT_CLASS(bkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) TP_ARGS(k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __field(u32, size )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __field(u32, inode )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __field(u64, offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __field(bool, dirty )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __entry->inode = KEY_INODE(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __entry->offset = KEY_OFFSET(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __entry->size = KEY_SIZE(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __entry->dirty = KEY_DIRTY(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) TP_printk("%u:%llu len %u dirty %u", __entry->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __entry->offset, __entry->size, __entry->dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) DECLARE_EVENT_CLASS(btree_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) TP_PROTO(struct btree *b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) TP_ARGS(b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __field(size_t, bucket )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) TP_printk("bucket %zu", __entry->bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* request.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) DEFINE_EVENT(bcache_request, bcache_request_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) TP_PROTO(struct bcache_device *d, struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) TP_ARGS(d, bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DEFINE_EVENT(bcache_request, bcache_request_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) TP_PROTO(struct bcache_device *d, struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) TP_ARGS(d, bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) DECLARE_EVENT_CLASS(bcache_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) TP_PROTO(struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) TP_ARGS(bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __field(sector_t, sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __field(unsigned int, nr_sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __array(char, rwbs, 6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __entry->dev = bio_dev(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __entry->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) __entry->nr_sector = bio->bi_iter.bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) TP_printk("%d,%d %s %llu + %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (unsigned long long)__entry->sector, __entry->nr_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) TP_PROTO(struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) TP_ARGS(bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) TP_PROTO(struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) TP_ARGS(bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) TRACE_EVENT(bcache_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) TP_PROTO(struct bio *bio, bool hit, bool bypass),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) TP_ARGS(bio, hit, bypass),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __field(sector_t, sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __field(unsigned int, nr_sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) __array(char, rwbs, 6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) __field(bool, cache_hit )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) __field(bool, bypass )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __entry->dev = bio_dev(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) __entry->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) __entry->nr_sector = bio->bi_iter.bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) __entry->cache_hit = hit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __entry->bypass = bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) MAJOR(__entry->dev), MINOR(__entry->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) __entry->rwbs, (unsigned long long)__entry->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __entry->nr_sector, __entry->cache_hit, __entry->bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) TRACE_EVENT(bcache_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool writeback, bool bypass),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) TP_ARGS(c, inode, bio, writeback, bypass),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __array(char, uuid, 16 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __field(u64, inode )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __field(sector_t, sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __field(unsigned int, nr_sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __array(char, rwbs, 6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __field(bool, writeback )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __field(bool, bypass )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) memcpy(__entry->uuid, c->set_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __entry->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __entry->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __entry->nr_sector = bio->bi_iter.bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) __entry->writeback = writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __entry->bypass = bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __entry->uuid, __entry->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __entry->rwbs, (unsigned long long)__entry->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __entry->nr_sector, __entry->writeback, __entry->bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) DEFINE_EVENT(bcache_bio, bcache_read_retry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) TP_PROTO(struct bio *bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) TP_ARGS(bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) DEFINE_EVENT(bkey, bcache_cache_insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) TP_ARGS(k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Journal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) DECLARE_EVENT_CLASS(cache_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) TP_ARGS(c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) __array(char, uuid, 16 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) memcpy(__entry->uuid, c->set_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) TP_printk("%pU", __entry->uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) DEFINE_EVENT(bkey, bcache_journal_replay_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) TP_ARGS(k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) DEFINE_EVENT(cache_set, bcache_journal_full,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) TP_ARGS(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) DEFINE_EVENT(cache_set, bcache_journal_entry_full,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) TP_ARGS(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) TRACE_EVENT(bcache_journal_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) TP_PROTO(struct bio *bio, u32 keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) TP_ARGS(bio, keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __field(sector_t, sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __field(unsigned int, nr_sector )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __array(char, rwbs, 6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __field(u32, nr_keys )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __entry->dev = bio_dev(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) __entry->sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __entry->nr_sector = bio->bi_iter.bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __entry->nr_keys = keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) TP_printk("%d,%d %s %llu + %u keys %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) (unsigned long long)__entry->sector, __entry->nr_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) __entry->nr_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) TP_ARGS(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) DEFINE_EVENT(btree_node, bcache_btree_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) TP_PROTO(struct btree *b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) TP_ARGS(b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) TRACE_EVENT(bcache_btree_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) TP_PROTO(struct btree *b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) TP_ARGS(b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) __field(size_t, bucket )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __field(unsigned, block )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) __field(unsigned, keys )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __entry->block = b->written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) __entry->keys = b->keys.set[b->keys.nsets].data->keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) TP_printk("bucket %zu written block %u + %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __entry->bucket, __entry->block, __entry->keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) TP_PROTO(struct btree *b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) TP_ARGS(b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) TP_ARGS(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) DEFINE_EVENT(btree_node, bcache_btree_node_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) TP_PROTO(struct btree *b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) TP_ARGS(b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) TRACE_EVENT(bcache_btree_gc_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) TP_PROTO(unsigned nodes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) TP_ARGS(nodes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) __field(unsigned, nodes )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) __entry->nodes = nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) TP_printk("coalesced %u nodes", __entry->nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) DEFINE_EVENT(cache_set, bcache_gc_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) TP_ARGS(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) DEFINE_EVENT(cache_set, bcache_gc_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) TP_PROTO(struct cache_set *c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) TP_ARGS(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) DEFINE_EVENT(bkey, bcache_gc_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) TP_ARGS(k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) DEFINE_EVENT(bkey, bcache_gc_copy_collision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) TP_ARGS(k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) TRACE_EVENT(bcache_btree_insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) TP_ARGS(b, k, op, status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __field(u64, btree_node )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) __field(u32, btree_level )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) __field(u32, inode )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) __field(u64, offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) __field(u32, size )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) __field(u8, dirty )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) __field(u8, op )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) __field(u8, status )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __entry->btree_level = b->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) __entry->inode = KEY_INODE(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) __entry->offset = KEY_OFFSET(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __entry->size = KEY_SIZE(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) __entry->dirty = KEY_DIRTY(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __entry->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) __entry->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __entry->status, __entry->op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) __entry->btree_node, __entry->btree_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) __entry->inode, __entry->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) __entry->size, __entry->dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) DECLARE_EVENT_CLASS(btree_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) TP_PROTO(struct btree *b, unsigned keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) TP_ARGS(b, keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) __field(size_t, bucket )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) __field(unsigned, keys )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) __entry->keys = keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) DEFINE_EVENT(btree_split, bcache_btree_node_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) TP_PROTO(struct btree *b, unsigned keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) TP_ARGS(b, keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) DEFINE_EVENT(btree_split, bcache_btree_node_compact,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) TP_PROTO(struct btree *b, unsigned keys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) TP_ARGS(b, keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) DEFINE_EVENT(btree_node, bcache_btree_set_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) TP_PROTO(struct btree *b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) TP_ARGS(b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) TRACE_EVENT(bcache_keyscan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) TP_PROTO(unsigned nr_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned start_inode, uint64_t start_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned end_inode, uint64_t end_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) TP_ARGS(nr_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) start_inode, start_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) end_inode, end_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) __field(__u32, nr_found )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) __field(__u32, start_inode )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __field(__u64, start_offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) __field(__u32, end_inode )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) __field(__u64, end_offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) __entry->nr_found = nr_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __entry->start_inode = start_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __entry->start_offset = start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __entry->end_inode = end_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) __entry->end_offset = end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __entry->start_inode, __entry->start_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) __entry->end_inode, __entry->end_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Allocator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) TRACE_EVENT(bcache_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) TP_PROTO(struct cache *ca, size_t bucket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) TP_ARGS(ca, bucket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) __field(unsigned, sectors )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) __field(__u64, offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) __entry->dev = ca->bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __entry->offset = bucket << ca->set->bucket_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) TP_printk("invalidated %u sectors at %d,%d sector=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) __entry->sectors, MAJOR(__entry->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) MINOR(__entry->dev), __entry->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) TRACE_EVENT(bcache_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) TP_PROTO(struct cache *ca, size_t bucket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) TP_ARGS(ca, bucket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) __field(__u64, offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) __entry->dev = ca->bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) __entry->offset = bucket << ca->set->bucket_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) MINOR(__entry->dev), __entry->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) TRACE_EVENT(bcache_alloc_fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) TP_PROTO(struct cache *ca, unsigned reserve),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) TP_ARGS(ca, reserve),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) TP_STRUCT__entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) __field(dev_t, dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) __field(unsigned, free )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) __field(unsigned, free_inc )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) __field(unsigned, blocked )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) TP_fast_assign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) __entry->dev = ca->bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __entry->free = fifo_used(&ca->free[reserve]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __entry->free_inc = fifo_used(&ca->free_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) __entry->blocked = atomic_read(&ca->set->prio_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) __entry->free_inc, __entry->blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Background writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) DEFINE_EVENT(bkey, bcache_writeback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) TP_ARGS(k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) DEFINE_EVENT(bkey, bcache_writeback_collision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) TP_PROTO(struct bkey *k),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) TP_ARGS(k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #endif /* _TRACE_BCACHE_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* This part must be outside protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) #include <trace/define_trace.h>