^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Workingset detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/memcontrol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm_inline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/shmem_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Double CLOCK lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Per node, two clock lists are maintained for file pages: the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * inactive and the active list. Freshly faulted pages start out at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * the head of the inactive list and page reclaim scans pages from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * tail. Pages that are accessed multiple times on the inactive list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * are promoted to the active list, to protect them from reclaim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * whereas active pages are demoted to the inactive list when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * active list grows too big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * fault ------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * +--------------+ | +-------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * reclaim <- | inactive | <-+-- demotion | active | <--+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * +--------------+ +-------------+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * +-------------- promotion ------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Access frequency and refault distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * A workload is thrashing when its pages are frequently used but they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * are evicted from the inactive list every time before another access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * would have promoted them to the active list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * In cases where the average access distance between thrashing pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * is bigger than the size of memory there is nothing that can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * done - the thrashing set could never fit into memory under any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * circumstance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * However, the average access distance could be bigger than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * inactive list, yet smaller than the size of memory. In this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * the set could fit into memory if it weren't for the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * active pages - which may be used more, hopefully less frequently:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * +-memory available to cache-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * +-inactive------+-active----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * a b | c d e f g h i | J K L M N |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * +---------------+-----------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * It is prohibitively expensive to accurately track access frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * of pages. But a reasonable approximation can be made to measure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * thrashing on the inactive list, after which refaulting pages can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * activated optimistically to compete with the existing active pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Approximating inactive page access frequency - Observations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * 1. When a page is accessed for the first time, it is added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * head of the inactive list, slides every existing inactive page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * towards the tail by one slot, and pushes the current tail page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * out of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * 2. When a page is accessed for the second time, it is promoted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * the active list, shrinking the inactive list by one slot. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * also slides all inactive pages that were faulted into the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * more recently than the activated page towards the tail of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * inactive list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Thus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * 1. The sum of evictions and activations between any two points in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * time indicate the minimum number of inactive pages accessed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * 2. Moving one inactive page N page slots towards the tail of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * list requires at least N inactive page accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Combining these:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * 1. When a page is finally evicted from memory, the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * inactive pages accessed while the page was in cache is at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * the number of page slots on the inactive list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * 2. In addition, measuring the sum of evictions and activations (E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * at the time of a page's eviction, and comparing it to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * reading (R) at the time the page faults back into memory tells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * the minimum number of accesses while the page was not cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * This is called the refault distance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Because the first access of the page was the fault and the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * access the refault, we combine the in-cache distance with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * out-of-cache distance to get the complete minimum access distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * of this page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * NR_inactive + (R - E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * And knowing the minimum access distance of a page, we can easily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * tell if the page would be able to stay in cache assuming all page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * slots in the cache were available:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * NR_inactive + (R - E) <= NR_inactive + NR_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * which can be further simplified to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * (R - E) <= NR_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Put into words, the refault distance (out-of-cache) can be seen as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * a deficit in inactive list space (in-cache). If the inactive list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * had (R - E) more page slots, the page would not have been evicted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * in between accesses, but activated instead. And on a full system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * the only thing eating into inactive list space is active pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Refaulting inactive pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * All that is known about the active list is that the pages have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * accessed more than once in the past. This means that at any given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * time there is actually a good chance that pages on the active list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * are no longer in active use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * So when a refault distance of (R - E) is observed and there are at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * least (R - E) active pages, the refaulting page is activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * optimistically in the hope that (R - E) active pages are actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * used less frequently than the refaulting page - or even not used at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * all anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * That means if inactive cache is refaulting with a suitable refault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * distance, we assume the cache workingset is transitioning and put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * pressure on the current active list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * If this is wrong and demotion kicks in, the pages which are truly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * used more frequently will be reactivated while the less frequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * used once will be evicted from memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * But if this is right, the stale pages will be pushed out of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * and the used pages get to stay in cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Refaulting active pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * If on the other hand the refaulting pages have recently been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * deactivated, it means that the active list is no longer protecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * actively used cache from reclaim. The cache is NOT transitioning to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * a different workingset; the existing workingset is thrashing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * space allocated to the page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * For each node's LRU lists, a counter for inactive evictions and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * activations is maintained (node->nonresident_age).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * On eviction, a snapshot of this counter (along with some bits to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * identify the node) is stored in the now empty page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * slot of the evicted page. This is called a shadow entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * On cache misses for which there are shadow entries, an eligible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * refault distance will immediately activate the refaulting page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Eviction timestamps need to be able to cover the full range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * actionable refaults. However, bits are tight in the xarray
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * entry, and after storing the identifier for the lruvec there might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * not be enough left to represent every single actionable refault. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * that case, we have to sacrifice granularity for distance, and group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * evictions into coarser buckets by shaving off lower timestamp bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static unsigned int bucket_order __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) bool workingset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) eviction >>= bucket_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) eviction &= EVICTION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) eviction = (eviction << 1) | workingset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return xa_mk_value(eviction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long *evictionp, bool *workingsetp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned long entry = xa_to_value(shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int memcgid, nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bool workingset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) workingset = entry & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) entry >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) nid = entry & ((1UL << NODES_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) entry >>= NODES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) entry >>= MEM_CGROUP_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *memcgidp = memcgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *pgdat = NODE_DATA(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *evictionp = entry << bucket_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *workingsetp = workingset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * workingset_age_nonresident - age non-resident entries as LRU ages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * @lruvec: the lruvec that was aged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @nr_pages: the number of pages to count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * As in-memory pages are aged, non-resident pages need to be aged as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * well, in order for the refault distances later on to be comparable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * to the in-memory dimensions. This function allows reclaim and LRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * operations to drive the non-resident aging along in parallel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Reclaiming a cgroup means reclaiming all its children in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * round-robin fashion. That means that each cgroup has an LRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * order that is composed of the LRU orders of its child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * cgroups; and every page has an LRU position not just in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * cgroup that owns it, but in all of that group's ancestors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * So when the physical inactive list of a leaf cgroup ages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * the virtual inactive lists of all its parents, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * the root cgroup's, age as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) atomic_long_add(nr_pages, &lruvec->nonresident_age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } while ((lruvec = parent_lruvec(lruvec)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * workingset_eviction - note the eviction of a page from memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @target_memcg: the cgroup that is causing the reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @page: the page being evicted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Returns a shadow entry to be stored in @page->mapping->i_pages in place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * of the evicted @page so that a later refault can be detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct pglist_data *pgdat = page_pgdat(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned long eviction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct lruvec *lruvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int memcgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* Page is fully exclusive and pins page->mem_cgroup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) VM_BUG_ON_PAGE(PageLRU(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) VM_BUG_ON_PAGE(page_count(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) workingset_age_nonresident(lruvec, thp_nr_pages(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* XXX: target_memcg can be NULL, go through lruvec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) eviction = atomic_long_read(&lruvec->nonresident_age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * workingset_refault - evaluate the refault of a previously evicted page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * @page: the freshly allocated replacement page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @shadow: shadow entry of the evicted page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Calculates and evaluates the refault distance of the previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * evicted page in the context of the node and the memcg whose memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * pressure caused the eviction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void workingset_refault(struct page *page, void *shadow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bool file = page_is_file_lru(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct mem_cgroup *eviction_memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct lruvec *eviction_lruvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned long refault_distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned long workingset_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct pglist_data *pgdat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct mem_cgroup *memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long eviction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct lruvec *lruvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned long refault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bool workingset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int memcgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Look up the memcg associated with the stored ID. It might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * have been deleted since the page's eviction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Note that in rare events the ID could have been recycled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * for a new cgroup that refaults a shared page. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * impossible to tell from the available data. However, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * should be a rare and limited disturbance, and activations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * are always speculative anyway. Ultimately, it's the aging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * algorithm's job to shake out the minimum access frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * for the active cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * XXX: On !CONFIG_MEMCG, this will always return NULL; it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * would be better if the root_mem_cgroup existed in all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * configurations instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) eviction_memcg = mem_cgroup_from_id(memcgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!mem_cgroup_disabled() && !eviction_memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) refault = atomic_long_read(&eviction_lruvec->nonresident_age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Calculate the refault distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * The unsigned subtraction here gives an accurate distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * across nonresident_age overflows in most cases. There is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * special case: usually, shadow entries have a short lifetime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * and are either refaulted or reclaimed along with the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * before they get too old. But it is not impossible for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * nonresident_age to lap a shadow entry in the field, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * can then result in a false small refault distance, leading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * to a false activation should this old entry actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * refault again. However, earlier kernels used to deactivate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * unconditionally with *every* reclaim invocation for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * longest time, so the occasional inappropriate activation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * leading to pressure on the active list is not a problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) refault_distance = (refault - eviction) & EVICTION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * The activation decision for this page is made at the level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * where the eviction occurred, as that is where the LRU order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * during page reclaim is being determined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * However, the cgroup that will own the page is the one that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * is actually experiencing the refault event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) memcg = page_memcg(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) lruvec = mem_cgroup_lruvec(memcg, pgdat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Compare the distance to the existing workingset size. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * don't activate pages that couldn't stay resident even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * all the memory was available to the workingset. Whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * workingset competition needs to consider anon or not depends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * on having swap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) workingset_size += lruvec_page_state(eviction_lruvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) NR_INACTIVE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) workingset_size += lruvec_page_state(eviction_lruvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) NR_ACTIVE_ANON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) workingset_size += lruvec_page_state(eviction_lruvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) NR_INACTIVE_ANON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (refault_distance > workingset_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) SetPageActive(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) workingset_age_nonresident(lruvec, thp_nr_pages(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Page was active prior to eviction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (workingset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) SetPageWorkingset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* XXX: Move to lru_cache_add() when it supports new vs putback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_lock_irq(&page_pgdat(page)->lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) lru_note_cost_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) spin_unlock_irq(&page_pgdat(page)->lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * workingset_activation - note a page activation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @page: page that is being activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void workingset_activation(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct mem_cgroup *memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct lruvec *lruvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Filter non-memcg pages here, e.g. unmap can call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * mark_page_accessed() on VDSO pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * XXX: See workingset_refault() - this should return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * root_mem_cgroup even for !CONFIG_MEMCG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) memcg = page_memcg_rcu(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!mem_cgroup_disabled() && !memcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) workingset_age_nonresident(lruvec, thp_nr_pages(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Shadow entries reflect the share of the working set that does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * fit into memory, so their number depends on the access pattern of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * the workload. In most cases, they will refault or get reclaimed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * along with the inode, but a (malicious) workload that streams
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * through files with a total size several times that of available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * memory, while preventing the inodes from being reclaimed, can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * create excessive amounts of shadow nodes. To keep a lid on this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * track shadow nodes and reclaim them when they grow way past the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * point where they would still be useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static struct list_lru shadow_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void workingset_update_node(struct xa_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Track non-empty nodes that contain only shadow entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * unlink those that contain pages or are being freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Avoid acquiring the list_lru lock when the nodes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * already where they should be. The list_empty() test is safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * as node->private_list is protected by the i_pages lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (node->count && node->count == node->nr_values) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (list_empty(&node->private_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) list_lru_add(&shadow_nodes, &node->private_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) __inc_lruvec_slab_state(node, WORKINGSET_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!list_empty(&node->private_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) list_lru_del(&shadow_nodes, &node->private_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) __dec_lruvec_slab_state(node, WORKINGSET_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static unsigned long count_shadow_nodes(struct shrinker *shrinker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned long max_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) unsigned long nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unsigned long pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) nodes = list_lru_shrink_count(&shadow_nodes, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Approximate a reasonable limit for the nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * containing shadow entries. We don't need to keep more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * shadow entries than possible pages on the active list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * since refault distances bigger than that are dismissed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * The size of the active list converges toward 100% of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * overall page cache as memory grows, with only a tiny
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * inactive list. Assume the total cache size for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Nodes might be sparsely populated, with only one shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * entry in the extreme case. Obviously, we cannot keep one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * node for every eligible shadow entry, so compromise on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * worst-case density of 1/8th. Below that, not all eligible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * refaults can be detected anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * On 64-bit with 7 xa_nodes per page and 64 slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * each, this will reclaim shadow entries when they consume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * ~1.8% of available memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #ifdef CONFIG_MEMCG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (sc->memcg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct lruvec *lruvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pages += lruvec_page_state_local(lruvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) NR_LRU_BASE + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pages += lruvec_page_state_local(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pages += lruvec_page_state_local(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pages = node_present_pages(sc->nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return SHRINK_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (nodes <= max_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return nodes - max_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static enum lru_status shadow_lru_isolate(struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct list_lru_one *lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) spinlock_t *lru_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) void *arg) __must_hold(lru_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct xa_node *node = container_of(item, struct xa_node, private_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Page cache insertions and deletions synchronously maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * the shadow node LRU under the i_pages lock and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * lru_lock. Because the page cache tree is emptied before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * the inode can be destroyed, holding the lru_lock pins any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * address_space that has nodes on the LRU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * We can then safely transition to the i_pages lock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * pin only the address_space of the particular node we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * to reclaim, take the node off-LRU, and drop the lru_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) mapping = container_of(node->array, struct address_space, i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Coming from the list, invert the lock order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!xa_trylock(&mapping->i_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_unlock_irq(lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ret = LRU_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) list_lru_isolate(lru, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) __dec_lruvec_slab_state(node, WORKINGSET_NODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_unlock(lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * The nodes should only contain one or more shadow entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * no pages, so we expect to be able to remove them all and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * delete and free the empty node afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (WARN_ON_ONCE(!node->nr_values))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) goto out_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (WARN_ON_ONCE(node->count != node->nr_values))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto out_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mapping->nrexceptional -= node->nr_values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) xa_delete_node(node, workingset_update_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) out_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) xa_unlock_irq(&mapping->i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ret = LRU_REMOVED_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) spin_lock_irq(lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* list_lru lock nests inside the IRQ-safe i_pages lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static struct shrinker workingset_shadow_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) .count_objects = count_shadow_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) .scan_objects = scan_shadow_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) .seeks = 0, /* ->count reports only fully expendable nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * i_pages lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static struct lock_class_key shadow_nodes_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int __init workingset_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned int timestamp_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) unsigned int max_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Calculate the eviction bucket size to cover the longest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * actionable refault distance, which is currently half of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * memory (totalram_pages/2). However, memory hotplug may add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * some more pages at runtime, so keep working with up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * double the initial memory by using totalram_pages as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) max_order = fls_long(totalram_pages() - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (max_order > timestamp_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) bucket_order = max_order - timestamp_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) timestamp_bits, max_order, bucket_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ret = prealloc_shrinker(&workingset_shadow_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) &workingset_shadow_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto err_list_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) register_shrinker_prepared(&workingset_shadow_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) err_list_lru:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) free_prealloced_shrinker(&workingset_shadow_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) module_init(workingset_init);