^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This file is part of UBIFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2006-2008 Nokia Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Artem Bityutskiy (Битюцкий Артём)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Adrian Hunter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This file implements UBIFS shrinker which evicts clean znodes from the TNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * tree when Linux VM needs more RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * We do not implement any LRU lists to find oldest znodes to free because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * would add additional overhead to the file system fast paths. So the shrinker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * just walks the TNC tree when searching for znodes to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * If the root of a TNC sub-tree is clean and old enough, then the children are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * also clean and old enough. So the shrinker walks the TNC in level order and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * dumps entire sub-trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The age of znodes is just the time-stamp when they were last looked at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The current shrinker first tries to evict old znodes, then young ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Since the shrinker is global, it has to protect against races with FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * un-mounts, which is done by the 'ubifs_infos_lock' and 'c->umount_mutex'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "ubifs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* List of all UBIFS file-system instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) LIST_HEAD(ubifs_infos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * We number each shrinker run and record the number on the ubifs_info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * so that we can easily work out which ubifs_info structures have already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * done by the current run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static unsigned int shrinker_run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Protects 'ubifs_infos' list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) DEFINE_SPINLOCK(ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Global clean znode counter (for all mounted UBIFS instances) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) atomic_long_t ubifs_clean_zn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * shrink_tnc - shrink TNC tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @c: UBIFS file-system description object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @nr: number of znodes to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @age: the age of znodes to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @contention: if any contention, this is set to %1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * This function traverses TNC tree and frees clean znodes. It does not free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * clean znodes which younger then @age. Returns number of freed znodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int total_freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct ubifs_znode *znode, *zprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) time64_t time = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ubifs_assert(c, mutex_is_locked(&c->umount_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ubifs_assert(c, mutex_is_locked(&c->tnc_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Traverse the TNC tree in levelorder manner, so that it is possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * to destroy large sub-trees. Indeed, if a znode is old, then all its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * children are older or of the same age.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Note, we are holding 'c->tnc_mutex', so we do not have to lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 'c->space_lock' when _reading_ 'c->clean_zn_cnt', because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * changed only when the 'c->tnc_mutex' is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) zprev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) while (znode && total_freed < nr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) atomic_long_read(&c->clean_zn_cnt) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * If the znode is clean, but it is in the 'c->cnext' list, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * means that this znode has just been written to flash as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * part of commit and was marked clean. They will be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * from the list at end commit. We cannot change the list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * because it is not protected by any mutex (design decision to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * make commit really independent and parallel to main I/O). So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * we just skip these znodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Note, the 'clean_zn_cnt' counters are not updated until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * after the commit, so the UBIFS shrinker does not report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * the znodes which are in the 'c->cnext' list as freeable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Also note, if the root of a sub-tree is not in 'c->cnext',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * then the whole sub-tree is not in 'c->cnext' as well, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * is safe to dump whole sub-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (znode->cnext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Very soon these znodes will be removed from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * and become freeable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *contention = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } else if (!ubifs_zn_dirty(znode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) abs(time - znode->time) >= age) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (znode->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) znode->parent->zbranch[znode->iip].znode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) c->zroot.znode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) freed = ubifs_destroy_tnc_subtree(c, znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) atomic_long_sub(freed, &ubifs_clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) atomic_long_sub(freed, &c->clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) total_freed += freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) znode = zprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (unlikely(!c->zroot.znode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) zprev = znode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return total_freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * shrink_tnc_trees - shrink UBIFS TNC trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @nr: number of znodes to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @age: the age of znodes to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @contention: if any contention, this is set to %1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * This function walks the list of mounted UBIFS file-systems and frees clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * znodes which are older than @age, until at least @nr znodes are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Returns the number of freed znodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int shrink_tnc_trees(int nr, int age, int *contention)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct ubifs_info *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_lock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) run_no = ++shrinker_run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) } while (run_no == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Iterate over all mounted UBIFS file-systems and try to shrink them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) p = ubifs_infos.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) while (p != &ubifs_infos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) c = list_entry(p, struct ubifs_info, infos_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * We move the ones we do to the end of the list, so we stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * when we see one we have already done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (c->shrinker_run_no == run_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!mutex_trylock(&c->umount_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Some un-mount is in progress, try next FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *contention = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * We're holding 'c->umount_mutex', so the file-system won't go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!mutex_trylock(&c->tnc_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mutex_unlock(&c->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *contention = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) spin_unlock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * OK, now we have TNC locked, the file-system cannot go away -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * it is safe to reap the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) c->shrinker_run_no = run_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) freed += shrink_tnc(c, nr, age, contention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) mutex_unlock(&c->tnc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_lock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Get the next list element before we move this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) p = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Move this one to the end of the list to provide some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * fairness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) list_move_tail(&c->infos_list, &ubifs_infos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mutex_unlock(&c->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (freed >= nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) spin_unlock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * kick_a_thread - kick a background thread to start commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * This function kicks a background thread to start background commit. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * %-1 if a thread was kicked or there is another reason to assume the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * will soon be freed or become freeable. If there are no dirty znodes, returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * %0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int kick_a_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct ubifs_info *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Iterate over all mounted UBIFS file-systems and find out if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * already an ongoing commit operation there. If no, then iterate for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * the second time and initiate background commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) spin_lock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) list_for_each_entry(c, &ubifs_infos, infos_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) long dirty_zn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!mutex_trylock(&c->umount_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Some un-mount is in progress, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * certainly free memory, so just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_unlock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) c->ro_mount || c->ro_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mutex_unlock(&c->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (c->cmt_state != COMMIT_RESTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) spin_unlock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) mutex_unlock(&c->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) list_move_tail(&c->infos_list, &ubifs_infos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_unlock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ubifs_request_bg_commit(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mutex_unlock(&c->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mutex_unlock(&c->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_unlock(&ubifs_infos_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long ubifs_shrink_count(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Due to the way UBIFS updates the clean znode counter it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * temporarily be negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long ubifs_shrink_scan(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned long nr = sc->nr_to_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int contention = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned long freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!clean_zn_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * No clean znodes, nothing to reap. All we can do in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * is to kick background threads to start commit, which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * probably make clean znodes which, in turn, will be freeable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * And we return -1 which means will make VM call us again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dbg_tnc("no clean znodes, kick a thread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return kick_a_thread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) freed = shrink_tnc_trees(nr, OLD_ZNODE_AGE, &contention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (freed >= nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dbg_tnc("not enough old znodes, try to free young ones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) freed += shrink_tnc_trees(nr - freed, YOUNG_ZNODE_AGE, &contention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (freed >= nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dbg_tnc("not enough young znodes, free all");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) freed += shrink_tnc_trees(nr - freed, 0, &contention);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!freed && contention) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dbg_tnc("freed nothing, but contention");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return SHRINK_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dbg_tnc("%lu znodes were freed, requested %lu", freed, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }