Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Authors: David Chinner and Glauber Costa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Generic LRU infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/list_lru.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/memcontrol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "slab.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static LIST_HEAD(list_lrus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static DEFINE_MUTEX(list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) static void list_lru_register(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	mutex_lock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	list_add(&lru->list, &list_lrus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	mutex_unlock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static void list_lru_unregister(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	mutex_lock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	list_del(&lru->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	mutex_unlock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static int lru_shrinker_id(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	return lru->shrinker_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static inline bool list_lru_memcg_aware(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return lru->memcg_aware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static inline struct list_lru_one *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct list_lru_memcg *memcg_lrus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 * Either lock or RCU protects the array of per cgroup lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 * from relocation (see memcg_update_list_lru_node).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 					   lockdep_is_held(&nlru->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (memcg_lrus && idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		return memcg_lrus->lru[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	return &nlru->lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static inline struct list_lru_one *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		   struct mem_cgroup **memcg_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct list_lru_one *l = &nlru->lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct mem_cgroup *memcg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (!nlru->memcg_lrus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	memcg = mem_cgroup_from_obj(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (!memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	if (memcg_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		*memcg_ptr = memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static void list_lru_register(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static void list_lru_unregister(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static int lru_shrinker_id(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static inline bool list_lru_memcg_aware(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static inline struct list_lru_one *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return &nlru->lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline struct list_lru_one *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		   struct mem_cgroup **memcg_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (memcg_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		*memcg_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return &nlru->lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif /* CONFIG_MEMCG_KMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bool list_lru_add(struct list_lru *lru, struct list_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	int nid = page_to_nid(virt_to_page(item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct mem_cgroup *memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct list_lru_one *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	spin_lock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (list_empty(item)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		l = list_lru_from_kmem(nlru, item, &memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		list_add_tail(item, &l->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		/* Set shrinker bit if the first element was added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		if (!l->nr_items++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			memcg_set_shrinker_bit(memcg, nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 					       lru_shrinker_id(lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		nlru->nr_items++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		spin_unlock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	spin_unlock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) EXPORT_SYMBOL_GPL(list_lru_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bool list_lru_del(struct list_lru *lru, struct list_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	int nid = page_to_nid(virt_to_page(item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct list_lru_one *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	spin_lock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (!list_empty(item)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		l = list_lru_from_kmem(nlru, item, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		list_del_init(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		l->nr_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		nlru->nr_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		spin_unlock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	spin_unlock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) EXPORT_SYMBOL_GPL(list_lru_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	list_del_init(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	list->nr_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) EXPORT_SYMBOL_GPL(list_lru_isolate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			   struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	list_move(item, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	list->nr_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) EXPORT_SYMBOL_GPL(list_lru_isolate_move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long list_lru_count_one(struct list_lru *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				 int nid, struct mem_cgroup *memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct list_lru_one *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	count = READ_ONCE(l->nr_items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) EXPORT_SYMBOL_GPL(list_lru_count_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long list_lru_count_node(struct list_lru *lru, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct list_lru_node *nlru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	return nlru->nr_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) EXPORT_SYMBOL_GPL(list_lru_count_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		    list_lru_walk_cb isolate, void *cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		    unsigned long *nr_to_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct list_lru_one *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct list_head *item, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	unsigned long isolated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	l = list_lru_from_memcg_idx(nlru, memcg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	list_for_each_safe(item, n, &l->list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		enum lru_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		 * decrement nr_to_walk first so that we don't livelock if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		 * get stuck on large numbers of LRU_RETRY items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (!*nr_to_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		--*nr_to_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		ret = isolate(item, l, &nlru->lock, cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		case LRU_REMOVED_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			assert_spin_locked(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		case LRU_REMOVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			isolated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			nlru->nr_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			 * If the lru lock has been dropped, our list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			 * traversal is now invalid and so we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			 * restart from scratch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			if (ret == LRU_REMOVED_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		case LRU_ROTATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			list_move_tail(item, &l->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		case LRU_SKIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		case LRU_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			 * The lru lock has been dropped, our list traversal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			 * now invalid and so we have to restart from scratch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			assert_spin_locked(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return isolated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		  list_lru_walk_cb isolate, void *cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		  unsigned long *nr_to_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	spin_lock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				  nr_to_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	spin_unlock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) EXPORT_SYMBOL_GPL(list_lru_walk_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		      list_lru_walk_cb isolate, void *cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		      unsigned long *nr_to_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	spin_lock_irq(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				  nr_to_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	spin_unlock_irq(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 				 list_lru_walk_cb isolate, void *cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				 unsigned long *nr_to_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	long isolated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	int memcg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				      nr_to_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		for_each_memcg_cache_index(memcg_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			spin_lock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			isolated += __list_lru_walk_one(nlru, memcg_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 							isolate, cb_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 							nr_to_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			spin_unlock(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			if (*nr_to_walk <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	return isolated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) EXPORT_SYMBOL_GPL(list_lru_walk_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void init_one_lru(struct list_lru_one *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	INIT_LIST_HEAD(&l->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	l->nr_items = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 					  int begin, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	for (i = begin; i < end; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		kfree(memcg_lrus->lru[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 				      int begin, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	for (i = begin; i < end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		struct list_lru_one *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		if (!l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		init_one_lru(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		memcg_lrus->lru[i] = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int memcg_init_list_lru_node(struct list_lru_node *nlru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	struct list_lru_memcg *memcg_lrus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	int size = memcg_nr_cache_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			      size * sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	if (!memcg_lrus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		kvfree(memcg_lrus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct list_lru_memcg *memcg_lrus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	 * This is called when shrinker has already been unregistered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	 * and nobody can use it. So, there is no need to use kvfree_rcu_local().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	kvfree(memcg_lrus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static void kvfree_rcu_local(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct list_lru_memcg *mlru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	mlru = container_of(head, struct list_lru_memcg, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	kvfree(mlru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int memcg_update_list_lru_node(struct list_lru_node *nlru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 				      int old_size, int new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	struct list_lru_memcg *old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	BUG_ON(old_size > new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	old = rcu_dereference_protected(nlru->memcg_lrus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 					lockdep_is_held(&list_lrus_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		kvfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * The locking below allows readers that hold nlru->lock avoid taking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 * rcu_read_lock (see list_lru_from_memcg_idx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	 * we have to use IRQ-safe primitives here to avoid deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	spin_lock_irq(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	rcu_assign_pointer(nlru->memcg_lrus, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	spin_unlock_irq(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	call_rcu(&old->rcu, kvfree_rcu_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 					      int old_size, int new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	struct list_lru_memcg *memcg_lrus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 					       lockdep_is_held(&list_lrus_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	/* do not bother shrinking the array back to the old size, because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	 * cannot handle allocation failures here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	lru->memcg_aware = memcg_aware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	if (!memcg_aware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	for_each_node(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if (memcg_init_list_lru_node(&lru->node[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	for (i = i - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		if (!lru->node[i].memcg_lrus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		memcg_destroy_list_lru_node(&lru->node[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void memcg_destroy_list_lru(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if (!list_lru_memcg_aware(lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	for_each_node(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		memcg_destroy_list_lru_node(&lru->node[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int memcg_update_list_lru(struct list_lru *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 				 int old_size, int new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (!list_lru_memcg_aware(lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	for_each_node(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		if (memcg_update_list_lru_node(&lru->node[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 					       old_size, new_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	for (i = i - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		if (!lru->node[i].memcg_lrus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		memcg_cancel_update_list_lru_node(&lru->node[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 						  old_size, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void memcg_cancel_update_list_lru(struct list_lru *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 					 int old_size, int new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (!list_lru_memcg_aware(lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	for_each_node(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		memcg_cancel_update_list_lru_node(&lru->node[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 						  old_size, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int memcg_update_all_list_lrus(int new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	struct list_lru *lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	int old_size = memcg_nr_cache_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	mutex_lock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	list_for_each_entry(lru, &list_lrus, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		ret = memcg_update_list_lru(lru, old_size, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	mutex_unlock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		memcg_cancel_update_list_lru(lru, old_size, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 				      int src_idx, struct mem_cgroup *dst_memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct list_lru_node *nlru = &lru->node[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	int dst_idx = dst_memcg->kmemcg_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct list_lru_one *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	 * we have to use IRQ-safe primitives here to avoid deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	spin_lock_irq(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	src = list_lru_from_memcg_idx(nlru, src_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	dst = list_lru_from_memcg_idx(nlru, dst_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	list_splice_init(&src->list, &dst->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (src->nr_items) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		dst->nr_items += src->nr_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		src->nr_items = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	spin_unlock_irq(&nlru->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void memcg_drain_list_lru(struct list_lru *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 				 int src_idx, struct mem_cgroup *dst_memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	if (!list_lru_memcg_aware(lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	for_each_node(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	struct list_lru *lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	mutex_lock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	list_for_each_entry(lru, &list_lrus, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		memcg_drain_list_lru(lru, src_idx, dst_memcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	mutex_unlock(&list_lrus_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void memcg_destroy_list_lru(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #endif /* CONFIG_MEMCG_KMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int __list_lru_init(struct list_lru *lru, bool memcg_aware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		    struct lock_class_key *key, struct shrinker *shrinker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	if (shrinker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		lru->shrinker_id = shrinker->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		lru->shrinker_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	memcg_get_cache_ids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	if (!lru->node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	for_each_node(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		spin_lock_init(&lru->node[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			lockdep_set_class(&lru->node[i].lock, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		init_one_lru(&lru->node[i].lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	err = memcg_init_list_lru(lru, memcg_aware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		kfree(lru->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		/* Do this so a list_lru_destroy() doesn't crash: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		lru->node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	list_lru_register(lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	memcg_put_cache_ids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) EXPORT_SYMBOL_GPL(__list_lru_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void list_lru_destroy(struct list_lru *lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	/* Already destroyed or not yet initialized? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	if (!lru->node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	memcg_get_cache_ids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	list_lru_unregister(lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	memcg_destroy_list_lru(lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	kfree(lru->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	lru->node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	lru->shrinker_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	memcg_put_cache_ids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) EXPORT_SYMBOL_GPL(list_lru_destroy);