Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * f2fs shrinker support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *   the basic infra was copied from fs/ubifs/shrinker.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (c) 2015 Motorola Mobility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) static LIST_HEAD(f2fs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static DEFINE_SPINLOCK(f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static unsigned int shrinker_run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	return count > 0 ? count : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	return atomic_read(&sbi->total_zombie_tree) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 				atomic_read(&sbi->total_ext_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) unsigned long f2fs_shrink_count(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 				struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct f2fs_sb_info *sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	spin_lock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	p = f2fs_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	while (p != &f2fs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		/* stop f2fs_put_super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		if (!mutex_trylock(&sbi->umount_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		spin_unlock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		/* count extent cache entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		count += __count_extent_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		/* count clean nat cache entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		count += __count_nat_entries(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		/* count free nids cache entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		count += __count_free_nids(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		spin_lock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		mutex_unlock(&sbi->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	spin_unlock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) unsigned long f2fs_shrink_scan(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 				struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	unsigned long nr = sc->nr_to_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct f2fs_sb_info *sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned int run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	unsigned long freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	spin_lock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		run_no = ++shrinker_run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	} while (run_no == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	p = f2fs_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	while (p != &f2fs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		sbi = list_entry(p, struct f2fs_sb_info, s_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (sbi->shrinker_run_no == run_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		/* stop f2fs_put_super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		if (!mutex_trylock(&sbi->umount_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		spin_unlock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		sbi->shrinker_run_no = run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		/* shrink extent cache entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		/* shrink clean nat cache entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		if (freed < nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			freed += f2fs_try_to_free_nats(sbi, nr - freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		/* shrink free nids cache entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (freed < nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			freed += f2fs_try_to_free_nids(sbi, nr - freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		spin_lock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		list_move_tail(&sbi->s_list, &f2fs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		mutex_unlock(&sbi->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		if (freed >= nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	spin_unlock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	spin_lock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	list_add_tail(&sbi->s_list, &f2fs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	spin_unlock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	spin_lock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	list_del_init(&sbi->s_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	spin_unlock(&f2fs_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }