Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <trace/events/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) struct backing_dev_info noop_backing_dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) EXPORT_SYMBOL_GPL(noop_backing_dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) static struct class *bdi_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) static const char *bdi_unknown_name = "(unknown)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * reader side locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) DEFINE_SPINLOCK(bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) static u64 bdi_id_cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static struct rb_root bdi_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) LIST_HEAD(bdi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) /* bdi_wq serves all asynchronous writeback tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) struct workqueue_struct *bdi_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static struct dentry *bdi_debug_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static void bdi_debug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	bdi_debug_root = debugfs_create_dir("bdi", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static int bdi_debug_stats_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct backing_dev_info *bdi = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	struct bdi_writeback *wb = &bdi->wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	unsigned long background_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	unsigned long dirty_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	unsigned long wb_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	spin_lock(&wb->list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		nr_dirty++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	list_for_each_entry(inode, &wb->b_io, i_io_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		nr_io++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		nr_more_io++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		if (inode->i_state & I_DIRTY_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 			nr_dirty_time++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	spin_unlock(&wb->list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	global_dirty_limits(&background_thresh, &dirty_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define K(x) ((x) << (PAGE_SHIFT - 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		   "BdiWriteback:       %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		   "BdiReclaimable:     %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		   "BdiDirtyThresh:     %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		   "DirtyThresh:        %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		   "BackgroundThresh:   %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		   "BdiDirtied:         %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		   "BdiWritten:         %10lu kB\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		   "BdiWriteBandwidth:  %10lu kBps\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		   "b_dirty:            %10lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		   "b_io:               %10lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		   "b_more_io:          %10lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		   "b_dirty_time:       %10lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		   "bdi_list:           %10u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		   "state:              %10lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		   K(wb_thresh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		   K(dirty_thresh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		   K(background_thresh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		   (unsigned long) K(wb->write_bandwidth),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		   nr_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		   nr_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		   nr_more_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		   nr_dirty_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		   !list_empty(&bdi->bdi_list), bdi->wb.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #undef K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			    &bdi_debug_stats_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static void bdi_debug_unregister(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	debugfs_remove_recursive(bdi->debug_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) static inline void bdi_debug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static inline void bdi_debug_register(struct backing_dev_info *bdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 				      const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static ssize_t read_ahead_kb_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 				  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	unsigned long read_ahead_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	ret = kstrtoul(buf, 10, &read_ahead_kb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define K(pages) ((pages) << (PAGE_SHIFT - 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define BDI_SHOW(name, expr)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static ssize_t name##_show(struct device *dev,				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			   struct device_attribute *attr, char *page)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static DEVICE_ATTR_RW(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static ssize_t min_ratio_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	unsigned int ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	ret = kstrtouint(buf, 10, &ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	ret = bdi_set_min_ratio(bdi, ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) BDI_SHOW(min_ratio, bdi->min_ratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) static ssize_t max_ratio_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	unsigned int ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	ret = kstrtouint(buf, 10, &ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	ret = bdi_set_max_ratio(bdi, ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) BDI_SHOW(max_ratio, bdi->max_ratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static ssize_t stable_pages_required_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 					  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 					  char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	dev_warn_once(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		"the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	return snprintf(page, PAGE_SIZE-1, "%d\n", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static DEVICE_ATTR_RO(stable_pages_required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static struct attribute *bdi_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	&dev_attr_read_ahead_kb.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	&dev_attr_min_ratio.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	&dev_attr_max_ratio.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	&dev_attr_stable_pages_required.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) ATTRIBUTE_GROUPS(bdi_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static __init int bdi_class_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	bdi_class = class_create(THIS_MODULE, "bdi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (IS_ERR(bdi_class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		return PTR_ERR(bdi_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	bdi_class->dev_groups = bdi_dev_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	bdi_debug_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) postcore_initcall(bdi_class_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static int bdi_init(struct backing_dev_info *bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static int __init default_bdi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				 WQ_SYSFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (!bdi_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	err = bdi_init(&noop_backing_dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) subsys_initcall(default_bdi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * This function is used when the first inode for this wb is marked dirty. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * wakes-up the corresponding bdi thread which should then take care of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * periodic background write-out of dirty inodes. Since the write-out would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * set up a timer which wakes the bdi thread up later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * Note, we wouldn't bother setting up the timer, but this function is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * by delaying the wake-up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * We have to be careful not to postpone flush work if it is scheduled for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * earlier. Thus we use queue_delayed_work().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) void wb_wakeup_delayed(struct bdi_writeback *wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	spin_lock_bh(&wb->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (test_bit(WB_registered, &wb->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	spin_unlock_bh(&wb->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * Initial write bandwidth: 100 MB/s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #define INIT_BW		(100 << (20 - PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		   gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	memset(wb, 0, sizeof(*wb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	if (wb != &bdi->wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		bdi_get(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	wb->bdi = bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	wb->last_old_flush = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	INIT_LIST_HEAD(&wb->b_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	INIT_LIST_HEAD(&wb->b_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	INIT_LIST_HEAD(&wb->b_more_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	INIT_LIST_HEAD(&wb->b_dirty_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	spin_lock_init(&wb->list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	wb->bw_time_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	wb->balanced_dirty_ratelimit = INIT_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	wb->dirty_ratelimit = INIT_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	wb->write_bandwidth = INIT_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	wb->avg_write_bandwidth = INIT_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	spin_lock_init(&wb->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	INIT_LIST_HEAD(&wb->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	wb->dirty_sleep = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	err = fprop_local_init_percpu(&wb->completions, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		goto out_put_bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		err = percpu_counter_init(&wb->stat[i], 0, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			goto out_destroy_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) out_destroy_stat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		percpu_counter_destroy(&wb->stat[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	fprop_local_destroy_percpu(&wb->completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) out_put_bdi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	if (wb != &bdi->wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		bdi_put(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * Remove bdi from the global list and shutdown any threads we have running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static void wb_shutdown(struct bdi_writeback *wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	/* Make sure nobody queues further work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	spin_lock_bh(&wb->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (!test_and_clear_bit(WB_registered, &wb->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		spin_unlock_bh(&wb->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	spin_unlock_bh(&wb->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	cgwb_remove_from_bdi_list(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	 * Drain work list and shutdown the delayed_work.  !WB_registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 * tells wb_workfn() that @wb is dying and its work_list needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 * be drained no matter what.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	mod_delayed_work(bdi_wq, &wb->dwork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	flush_delayed_work(&wb->dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	WARN_ON(!list_empty(&wb->work_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static void wb_exit(struct bdi_writeback *wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	WARN_ON(delayed_work_pending(&wb->dwork));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		percpu_counter_destroy(&wb->stat[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	fprop_local_destroy_percpu(&wb->completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (wb != &wb->bdi->wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		bdi_put(wb->bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) #ifdef CONFIG_CGROUP_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #include <linux/memcontrol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  * bdi->cgwb_tree is also RCU protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static DEFINE_SPINLOCK(cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) static struct workqueue_struct *cgwb_release_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static void cgwb_release_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 						release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	mutex_lock(&wb->bdi->cgwb_release_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	wb_shutdown(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	css_put(wb->memcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	css_put(wb->blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	mutex_unlock(&wb->bdi->cgwb_release_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	/* triggers blkg destruction if no online users left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	blkcg_unpin_online(blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	fprop_local_destroy_percpu(&wb->memcg_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	percpu_ref_exit(&wb->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	wb_exit(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	kfree_rcu(wb, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) static void cgwb_release(struct percpu_ref *refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 						refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	queue_work(cgwb_release_wq, &wb->release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static void cgwb_kill(struct bdi_writeback *wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	lockdep_assert_held(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	list_del(&wb->memcg_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	list_del(&wb->blkcg_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	percpu_ref_kill(&wb->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	list_del_rcu(&wb->bdi_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static int cgwb_create(struct backing_dev_info *bdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	struct mem_cgroup *memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct cgroup_subsys_state *blkcg_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	struct blkcg *blkcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct bdi_writeback *wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	memcg = mem_cgroup_from_css(memcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	blkcg = css_to_blkcg(blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	memcg_cgwb_list = &memcg->cgwb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	blkcg_cgwb_list = &blkcg->cgwb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* look up again under lock and discard on blkcg mismatch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	spin_lock_irqsave(&cgwb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (wb && wb->blkcg_css != blkcg_css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		cgwb_kill(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		wb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	spin_unlock_irqrestore(&cgwb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* need to create a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	wb = kmalloc(sizeof(*wb), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!wb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	ret = wb_init(wb, bdi, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		goto err_wb_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		goto err_ref_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	wb->memcg_css = memcg_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	wb->blkcg_css = blkcg_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	INIT_WORK(&wb->release_work, cgwb_release_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	set_bit(WB_registered, &wb->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	 * The root wb determines the registered state of the whole bdi and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	 * whether they're still online.  Don't link @wb if any is dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	 * See wb_memcg_offline() and wb_blkcg_offline().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	spin_lock_irqsave(&cgwb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if (test_bit(WB_registered, &bdi->wb.state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		/* we might have raced another instance of this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			list_add(&wb->memcg_node, memcg_cgwb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			list_add(&wb->blkcg_node, blkcg_cgwb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			blkcg_pin_online(blkcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			css_get(memcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			css_get(blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	spin_unlock_irqrestore(&cgwb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		if (ret == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		goto err_fprop_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) err_fprop_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	fprop_local_destroy_percpu(&wb->memcg_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) err_ref_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	percpu_ref_exit(&wb->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) err_wb_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	wb_exit(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	kfree(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	css_put(blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  * wb_get_lookup - get wb for a given memcg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  * @bdi: target bdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  * refcount incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * This function uses css_get() on @memcg_css and thus expects its refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * to be positive on invocation.  IOW, rcu_read_lock() protection on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * @memcg_css isn't enough.  try_get it before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * memcg on the default hierarchy, memcg association is guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  * more specific (equal or descendant to the associated blkcg) and thus can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  * identify both the memcg and blkcg associations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * Because the blkcg associated with a memcg may change as blkcg is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  * and disabled closer to root in the hierarchy, each wb keeps track of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  * both the memcg and blkcg associated with it and verifies the blkcg on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * each lookup.  On mismatch, the existing wb is discarded and a new one is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				    struct cgroup_subsys_state *memcg_css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	struct bdi_writeback *wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (!memcg_css->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		return &bdi->wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (wb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		struct cgroup_subsys_state *blkcg_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		/* see whether the blkcg association has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			wb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		css_put(blkcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * wb_get_create - get wb for a given memcg, create if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * @bdi: target bdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * @gfp: allocation mask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * create one.  See wb_get_lookup() for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 				    struct cgroup_subsys_state *memcg_css,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 				    gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	struct bdi_writeback *wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	might_sleep_if(gfpflags_allow_blocking(gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if (!memcg_css->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		return &bdi->wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		wb = wb_get_lookup(bdi, memcg_css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	return wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static int cgwb_bdi_init(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	mutex_init(&bdi->cgwb_release_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	init_rwsem(&bdi->wb_switch_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		bdi->wb.memcg_css = &root_mem_cgroup->css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		bdi->wb.blkcg_css = blkcg_root_css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	void **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct bdi_writeback *wb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		cgwb_kill(*slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	mutex_lock(&bdi->cgwb_release_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	while (!list_empty(&bdi->wb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				      bdi_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		wb_shutdown(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	mutex_unlock(&bdi->cgwb_release_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * @memcg: memcg being offlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * Also prevents creation of any new wb's associated with @memcg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) void wb_memcg_offline(struct mem_cgroup *memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	struct bdi_writeback *wb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		cgwb_kill(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * @blkcg: blkcg being offlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * Also prevents creation of any new wb's associated with @blkcg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) void wb_blkcg_offline(struct blkcg *blkcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct bdi_writeback *wb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		cgwb_kill(wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	blkcg->cgwb_list.next = NULL;	/* prevent new wb's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) static void cgwb_bdi_register(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	spin_lock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	spin_unlock_irq(&cgwb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) static int __init cgwb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	 * There can be many concurrent release work items overwhelming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	 * system_wq.  Put them in a separate wq and limit concurrency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	 * There's no point in executing many of these in parallel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (!cgwb_release_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) subsys_initcall(cgwb_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) #else	/* CONFIG_CGROUP_WRITEBACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static int cgwb_bdi_init(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	return wb_init(&bdi->wb, bdi, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static void cgwb_bdi_register(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	list_del_rcu(&wb->bdi_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) #endif	/* CONFIG_CGROUP_WRITEBACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static int bdi_init(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	bdi->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	kref_init(&bdi->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	bdi->min_ratio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	bdi->max_ratio = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	bdi->max_prop_frac = FPROP_FRAC_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	INIT_LIST_HEAD(&bdi->bdi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	INIT_LIST_HEAD(&bdi->wb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	init_waitqueue_head(&bdi->wb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	ret = cgwb_bdi_init(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) struct backing_dev_info *bdi_alloc(int node_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct backing_dev_info *bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (!bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (bdi_init(bdi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		kfree(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	bdi->ra_pages = VM_READAHEAD_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	bdi->io_pages = VM_READAHEAD_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) EXPORT_SYMBOL(bdi_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct rb_node **p = &bdi_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	struct backing_dev_info *bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	lockdep_assert_held(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		bdi = rb_entry(parent, struct backing_dev_info, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		if (bdi->id > id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		else if (bdi->id < id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (parentp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		*parentp = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * bdi_get_by_id - lookup and get bdi from its id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * @id: bdi id to lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * Find bdi matching @id and get it.  Returns NULL if the matching bdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * doesn't exist or is already unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) struct backing_dev_info *bdi_get_by_id(u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	struct backing_dev_info *bdi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct rb_node **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	spin_lock_bh(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	p = bdi_lookup_rb_node(id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		bdi = rb_entry(*p, struct backing_dev_info, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		bdi_get(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	spin_unlock_bh(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	return bdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct rb_node *parent, **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (bdi->dev)	/* The driver needs to use separate queues per device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (IS_ERR(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		return PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	cgwb_bdi_register(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	bdi->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	bdi_debug_register(bdi, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	set_bit(WB_registered, &bdi->wb.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	spin_lock_bh(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	bdi->id = ++bdi_id_cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	p = bdi_lookup_rb_node(bdi->id, &parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	rb_link_node(&bdi->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	rb_insert_color(&bdi->rb_node, &bdi_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	spin_unlock_bh(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	trace_writeback_bdi_register(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	ret = bdi_register_va(bdi, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) EXPORT_SYMBOL(bdi_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	WARN_ON_ONCE(bdi->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	bdi->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	get_device(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * Remove bdi from bdi_list, and ensure that it is no longer visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) static void bdi_remove_from_list(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	spin_lock_bh(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	rb_erase(&bdi->rb_node, &bdi_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	list_del_rcu(&bdi->bdi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	spin_unlock_bh(&bdi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	synchronize_rcu_expedited();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) void bdi_unregister(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	/* make sure nobody finds us on the bdi_list anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	bdi_remove_from_list(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	wb_shutdown(&bdi->wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	cgwb_bdi_unregister(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	 * update the global bdi_min_ratio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	if (bdi->min_ratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		bdi_set_min_ratio(bdi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (bdi->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		bdi_debug_unregister(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		device_unregister(bdi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		bdi->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (bdi->owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		put_device(bdi->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		bdi->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static void release_bdi(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	struct backing_dev_info *bdi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			container_of(ref, struct backing_dev_info, refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (test_bit(WB_registered, &bdi->wb.state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		bdi_unregister(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	WARN_ON_ONCE(bdi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	wb_exit(&bdi->wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	kfree(bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) void bdi_put(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	kref_put(&bdi->refcnt, release_bdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) EXPORT_SYMBOL(bdi_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) const char *bdi_dev_name(struct backing_dev_info *bdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (!bdi || !bdi->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		return bdi_unknown_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	return bdi->dev_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) EXPORT_SYMBOL_GPL(bdi_dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static wait_queue_head_t congestion_wqh[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static atomic_t nr_wb_congested[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	wait_queue_head_t *wqh = &congestion_wqh[sync];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	enum wb_congested_state bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	bit = sync ? WB_sync_congested : WB_async_congested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (test_and_clear_bit(bit, &bdi->wb.congested))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		atomic_dec(&nr_wb_congested[sync]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (waitqueue_active(wqh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		wake_up(wqh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) EXPORT_SYMBOL(clear_bdi_congested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) void set_bdi_congested(struct backing_dev_info *bdi, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	enum wb_congested_state bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	bit = sync ? WB_sync_congested : WB_async_congested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (!test_and_set_bit(bit, &bdi->wb.congested))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		atomic_inc(&nr_wb_congested[sync]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) EXPORT_SYMBOL(set_bdi_congested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  * congestion_wait - wait for a backing_dev to become uncongested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  * @sync: SYNC or ASYNC IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  * @timeout: timeout in jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * write congestion.  If no backing_devs are congested then just wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  * next write to be completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) long congestion_wait(int sync, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	unsigned long start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	wait_queue_head_t *wqh = &congestion_wqh[sync];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	ret = io_schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	finish_wait(wqh, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 					jiffies_to_usecs(jiffies - start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) EXPORT_SYMBOL(congestion_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * @sync: SYNC or ASYNC IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * @timeout: timeout in jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * In the event of a congested backing_dev (any backing_dev) this waits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * for up to @timeout jiffies for either a BDI to exit congestion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  * given @sync queue or a write to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  * The return value is 0 if the sleep is for the full timeout. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  * it is the number of jiffies that were still remaining when the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  * returned. return_value == timeout implies the function did not sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) long wait_iff_congested(int sync, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	unsigned long start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	wait_queue_head_t *wqh = &congestion_wqh[sync];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	 * If there is no congestion, yield if necessary instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	 * of sleeping on the congestion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (atomic_read(&nr_wb_congested[sync]) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		/* In case we scheduled, work out time remaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		ret = timeout - (jiffies - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	/* Sleep until uncongested or a write happens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	ret = io_schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	finish_wait(wqh, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 					jiffies_to_usecs(jiffies - start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) EXPORT_SYMBOL(wait_iff_congested);