^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Implement the manual drop-all-pagecache function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /* A global variable is a bit ugly, but it keeps the code simple */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) int sysctl_drop_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void drop_pagecache_sb(struct super_block *sb, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct inode *inode, *toput_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) spin_lock(&sb->s_inode_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * We must skip inodes in unusual state. We may also skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * inodes without pages but we deliberately won't in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * we need to reschedule to avoid softlockups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) (inode->i_mapping->nrpages == 0 && !need_resched())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) __iget(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) spin_unlock(&sb->s_inode_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) invalidate_mapping_pages(inode->i_mapping, 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) iput(toput_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) toput_inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) spin_lock(&sb->s_inode_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) spin_unlock(&sb->s_inode_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) iput(toput_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int drop_caches_sysctl_handler(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void *buffer, size_t *length, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int stfu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (sysctl_drop_caches & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) iterate_supers(drop_pagecache_sb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) count_vm_event(DROP_PAGECACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (sysctl_drop_caches & 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) drop_slab();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) count_vm_event(DROP_SLAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!stfu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pr_info("%s (%d): drop_caches: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) current->comm, task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sysctl_drop_caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) stfu |= sysctl_drop_caches & 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }