Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _BCACHE_WRITEBACK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _BCACHE_WRITEBACK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #define CUTOFF_WRITEBACK	40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #define CUTOFF_WRITEBACK_SYNC	70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define CUTOFF_WRITEBACK_MAX		70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define CUTOFF_WRITEBACK_SYNC_MAX	90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define MAX_WRITEBACKS_IN_PASS  5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define MAX_WRITESIZE_IN_PASS   5000	/* *512b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define WRITEBACK_RATE_UPDATE_SECS_MAX		60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define BCH_AUTO_GC_DIRTY_THRESHOLD	50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define BCH_DIRTY_INIT_THRD_MAX	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * 14 (16384ths) is chosen here as something that each backing device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * should be a reasonable fraction of the share, and not to blow up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * until individual backing devices are a petabyte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define WRITEBACK_SHARE_SHIFT   14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) struct bch_dirty_init_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct dirty_init_thrd_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct bch_dirty_init_state	*state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct task_struct		*thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct bch_dirty_init_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct cache_set		*c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct bcache_device		*d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int				total_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int				key_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	spinlock_t			idx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	atomic_t			started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	atomic_t			enough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	wait_queue_head_t		wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct dirty_init_thrd_info	infos[BCH_DIRTY_INIT_THRD_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	uint64_t i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	for (i = 0; i < d->nr_stripes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		ret += atomic_read(d->stripe_sectors_dirty + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline int offset_to_stripe(struct bcache_device *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 					uint64_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	do_div(offset, d->stripe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	/* d->nr_stripes is in range [1, INT_MAX] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (unlikely(offset >= d->nr_stripes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			offset, d->nr_stripes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * Here offset is definitly smaller than INT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * return it as int will never overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 					   uint64_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 					   unsigned int nr_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int stripe = offset_to_stripe(&dc->disk, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (stripe < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (nr_sectors <= dc->disk.stripe_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		nr_sectors -= dc->disk.stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		stripe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) extern unsigned int bch_cutoff_writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) extern unsigned int bch_cutoff_writeback_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				    unsigned int cache_mode, bool would_skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	unsigned int in_use = dc->disk.c->gc_stats.in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (cache_mode != CACHE_MODE_WRITEBACK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	    test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	    in_use > bch_cutoff_writeback_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (bio_op(bio) == REQ_OP_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (dc->partial_stripes_expensive &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 				    bio_sectors(bio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (would_skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	return (op_is_sync(bio->bi_opf) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		bio->bi_opf & (REQ_META|REQ_PRIO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		in_use <= bch_cutoff_writeback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline void bch_writeback_queue(struct cached_dev *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		wake_up_process(dc->writeback_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline void bch_writeback_add(struct cached_dev *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (!atomic_read(&dc->has_dirty) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	    !atomic_xchg(&dc->has_dirty, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			/* XXX: should do this synchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			bch_write_bdev_super(dc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		bch_writeback_queue(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				  uint64_t offset, int nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void bch_sectors_dirty_init(struct bcache_device *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void bch_cached_dev_writeback_init(struct cached_dev *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int bch_cached_dev_writeback_start(struct cached_dev *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #endif