^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Memory merging support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This code enables dynamic sharing of identical pages found in different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * memory areas, even if they are not shared by fork()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2008-2009 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Izik Eidus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Andrea Arcangeli
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Chris Wright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Hugh Dickins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/coredump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/rmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/xxhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/ksm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/oom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define NUMA(x) (x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DO_NUMA(x) do { (x); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NUMA(x) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DO_NUMA(x) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * DOC: Overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * A few notes about the KSM scanning process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * to make it easier to understand the data structures below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * In order to reduce excessive scanning, KSM sorts the memory pages by their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * contents into a data structure that holds pointers to the pages' locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Since the contents of the pages may change at any moment, KSM cannot just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * insert the pages into a normal sorted tree and expect it to find anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Therefore KSM uses two data structures - the stable and the unstable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * The stable tree holds pointers to all the merged pages (ksm pages), sorted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * by their contents. Because each such page is write-protected, searching on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * this tree is fully assured to be working (except when pages are unmapped),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * and therefore this tree is called the stable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The stable tree node includes information required for reverse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * mapping from a KSM page to virtual addresses that map this page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * In order to avoid large latencies of the rmap walks on KSM pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * KSM maintains two types of nodes in the stable tree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * * the regular nodes that keep the reverse mapping structures in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * linked list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * * the "chains" that link nodes ("dups") that represent the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * write protected memory content, but each "dup" corresponds to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * different KSM page copy of that content
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Internally, the regular nodes, "dups" and "chains" are represented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * using the same struct stable_node structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * In addition to the stable tree, KSM uses a second data structure called the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * unstable tree: this tree holds pointers to pages which have been found to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * be "unchanged for a period of time". The unstable tree sorts these pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * by their contents, but since they are not write-protected, KSM cannot rely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * upon the unstable tree to work correctly - the unstable tree is liable to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * be corrupted as its contents are modified, and so it is called unstable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * KSM solves this problem by several techniques:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * 1) The unstable tree is flushed every time KSM completes scanning all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * memory areas, and then the tree is rebuilt again from the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * 2) KSM will only insert into the unstable tree, pages whose hash value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * has not changed since the previous scan of all memory areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * colors of the nodes and not on their contents, assuring that even when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * the tree gets "corrupted" it won't get out of balance, so scanning time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * remains the same (also, searching and inserting nodes in an rbtree uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * the same algorithm, so we have no overhead when we flush and rebuild).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * 4) KSM never flushes the stable tree, which means that even if it were to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * take 10 attempts to find a page in the unstable tree, once it is found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * it is secured in the stable tree. (When we scan a new page, we first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * compare it against the stable tree, and then against the unstable tree.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * If the merge_across_nodes tunable is unset, then KSM maintains multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * stable trees and multiple unstable trees: one of each for each NUMA node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * struct mm_slot - ksm information per mm that is being scanned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @link: link to the mm_slots hash list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @mm: the mm that this information is valid for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct mm_slot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct hlist_node link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct list_head mm_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct rmap_item *rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * struct ksm_scan - cursor for scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @mm_slot: the current mm_slot we are scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @address: the next address inside that to be scanned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @rmap_list: link to the next rmap to be scanned in the rmap_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @seqnr: count of completed full scans (needed when removing unstable node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * There is only the one ksm_scan instance of this cursor structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct ksm_scan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct mm_slot *mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct rmap_item **rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long seqnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * struct stable_node - node of the stable rbtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @node: rb node of this ksm page in the stable tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @list: linked into migrate_nodes, pending placement in the proper node tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @hlist: hlist head of rmap_items using this ksm page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @chain_prune_time: time of the last full garbage collection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct stable_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct rb_node node; /* when node of stable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct { /* when listed for migration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct hlist_node hlist_dup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct hlist_head hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long kpfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long chain_prune_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * STABLE_NODE_CHAIN can be any negative number in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * rmap_hlist_len negative range, but better not -1 to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * to reliably detect underflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define STABLE_NODE_CHAIN -1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int rmap_hlist_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * struct rmap_item - reverse mapping item for virtual addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @nid: NUMA node id of unstable tree in which linked (may not match page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @mm: the memory structure this rmap_item is pointing into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @address: the virtual address this rmap_item tracks (+ flags in low bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @oldchecksum: previous checksum of the page at that virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @node: rb node of this rmap_item in the unstable tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @head: pointer to stable_node heading this list in the stable tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @hlist: link into hlist of rmap_items hanging off that stable_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct rmap_item {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct rmap_item *rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct anon_vma *anon_vma; /* when stable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int nid; /* when node of unstable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long address; /* + low bits used for flags below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int oldchecksum; /* when unstable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct rb_node node; /* when node of unstable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct { /* when listed from stable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct stable_node *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct hlist_node hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define STABLE_FLAG 0x200 /* is listed from the stable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* to mask all the flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* The stable and unstable tree heads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static struct rb_root one_stable_tree[1] = { RB_ROOT };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static struct rb_root one_unstable_tree[1] = { RB_ROOT };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static struct rb_root *root_stable_tree = one_stable_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static struct rb_root *root_unstable_tree = one_unstable_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Recently migrated nodes of stable tree, pending proper placement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static LIST_HEAD(migrate_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define MM_SLOTS_HASH_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static struct mm_slot ksm_mm_head = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static struct ksm_scan ksm_scan = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .mm_slot = &ksm_mm_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static struct kmem_cache *rmap_item_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static struct kmem_cache *stable_node_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static struct kmem_cache *mm_slot_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* The number of nodes in the stable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static unsigned long ksm_pages_shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* The number of page slots additionally sharing those nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static unsigned long ksm_pages_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* The number of nodes in the unstable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static unsigned long ksm_pages_unshared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* The number of rmap_items in use: to calculate pages_volatile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static unsigned long ksm_rmap_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* The number of stable_node chains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static unsigned long ksm_stable_node_chains;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* The number of stable_node dups linked to the stable_node chains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static unsigned long ksm_stable_node_dups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Delay in pruning stale stable_node_dups in the stable_node_chains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int ksm_stable_node_chains_prune_millisecs = 2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Maximum number of page slots sharing a stable node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static int ksm_max_page_sharing = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Number of pages ksmd should scan in one batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static unsigned int ksm_thread_pages_to_scan = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Milliseconds ksmd should sleep between batches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static unsigned int ksm_thread_sleep_millisecs = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Checksum of an empty (zeroed) page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static unsigned int zero_checksum __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Whether to merge empty (zeroed) pages with actual zero pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static bool ksm_use_zero_pages __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Zeroed when merging across nodes is not allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static unsigned int ksm_merge_across_nodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int ksm_nr_node_ids = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define ksm_merge_across_nodes 1U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define ksm_nr_node_ids 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define KSM_RUN_STOP 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define KSM_RUN_MERGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #define KSM_RUN_UNMERGE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define KSM_RUN_OFFLINE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static unsigned long ksm_run = KSM_RUN_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void wait_while_offlining(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static DEFINE_MUTEX(ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static DEFINE_SPINLOCK(ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sizeof(struct __struct), __alignof__(struct __struct),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) (__flags), NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int __init ksm_slab_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!rmap_item_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!stable_node_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) goto out_free1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!mm_slot_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) goto out_free2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) out_free2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) kmem_cache_destroy(stable_node_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) out_free1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) kmem_cache_destroy(rmap_item_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void __init ksm_slab_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) kmem_cache_destroy(mm_slot_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kmem_cache_destroy(stable_node_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) kmem_cache_destroy(rmap_item_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mm_slot_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static __always_inline bool is_stable_node_chain(struct stable_node *chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static __always_inline bool is_stable_node_dup(struct stable_node *dup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return dup->head == STABLE_NODE_DUP_HEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static inline void stable_node_chain_add_dup(struct stable_node *dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct stable_node *chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) VM_BUG_ON(is_stable_node_dup(dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dup->head = STABLE_NODE_DUP_HEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) VM_BUG_ON(!is_stable_node_chain(chain));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) hlist_add_head(&dup->hlist_dup, &chain->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ksm_stable_node_dups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static inline void __stable_node_dup_del(struct stable_node *dup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) VM_BUG_ON(!is_stable_node_dup(dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) hlist_del(&dup->hlist_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ksm_stable_node_dups--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline void stable_node_dup_del(struct stable_node *dup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) VM_BUG_ON(is_stable_node_chain(dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (is_stable_node_dup(dup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) __stable_node_dup_del(dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dup->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static inline struct rmap_item *alloc_rmap_item(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct rmap_item *rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) __GFP_NORETRY | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ksm_rmap_items++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static inline void free_rmap_item(struct rmap_item *rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ksm_rmap_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rmap_item->mm = NULL; /* debug safety */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) kmem_cache_free(rmap_item_cache, rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static inline struct stable_node *alloc_stable_node(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * The allocation can take too long with GFP_KERNEL when memory is under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * grants access to memory reserves, helping to avoid this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static inline void free_stable_node(struct stable_node *stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) VM_BUG_ON(stable_node->rmap_hlist_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) !is_stable_node_chain(stable_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kmem_cache_free(stable_node_cache, stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static inline struct mm_slot *alloc_mm_slot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!mm_slot_cache) /* initialization failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static inline void free_mm_slot(struct mm_slot *mm_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kmem_cache_free(mm_slot_cache, mm_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static struct mm_slot *get_mm_slot(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct mm_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (slot->mm == mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void insert_to_mm_slots_hash(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct mm_slot *mm_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mm_slot->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * page tables after it has passed through ksm_exit() - which, if necessary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * a special flag: they can just back out as soon as mm_users goes to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * ksm_test_exit() is used throughout to make this test for exit: in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * places for correctness, in some places just to avoid unnecessary work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline bool ksm_test_exit(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return atomic_read(&mm->mm_users) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * We use break_ksm to break COW on a ksm page: it's a stripped down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * in case the application has unmapped and remapped mm,addr meanwhile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * of the process that owns 'vma'. We also do not want to enforce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * protection keys here anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) vm_fault_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) page = follow_page(vma, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (IS_ERR_OR_NULL(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (PageKsm(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = handle_mm_fault(vma, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ret = VM_FAULT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) put_user_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * We must loop because handle_mm_fault() may back out if there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * any difficulty e.g. if pte accessed bit gets updated concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * VM_FAULT_WRITE is what we have been hoping for: it indicates that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * COW has been broken, even if the vma does not permit VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * but note that a concurrent fault might break PageKsm for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * VM_FAULT_SIGBUS could occur if we race with truncation of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * backing file, which also invalidates anonymous pages: that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * okay, that truncation will have unmapped the PageKsm for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * VM_FAULT_OOM: at the time of writing (late July 2009), setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * current task has TIF_MEMDIE set, and will be OOM killed on return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * to user; and ksmd, having no mm, would never be chosen for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * But if the mm is in a limited mem_cgroup, then the fault may fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * even ksmd can fail in this way - though it's usually breaking ksm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * just to undo a merge it made a moment before, so unlikely to oom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * That's a pity: we might therefore have more kernel pages allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * than we're counting as nodes in the stable tree; but ksm_do_scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * will retry to break_cow on each pass, so should recover the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * in due course. The important thing is to not let VM_MERGEABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * be cleared while any such pages might remain in the area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (ksm_test_exit(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!vma || vma->vm_start > addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void break_cow(struct rmap_item *rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct mm_struct *mm = rmap_item->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned long addr = rmap_item->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * It is not an accident that whenever we want to break COW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * to undo, we also need to drop a reference to the anon_vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) put_anon_vma(rmap_item->anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) vma = find_mergeable_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break_ksm(vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static struct page *get_mergeable_page(struct rmap_item *rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct mm_struct *mm = rmap_item->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long addr = rmap_item->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) vma = find_mergeable_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) page = follow_page(vma, addr, FOLL_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (IS_ERR_OR_NULL(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (PageAnon(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) flush_anon_page(vma, page, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) put_user_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * This helper is used for getting right index into array of tree roots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * When merge_across_nodes knob is set to 1, there are only two rb-trees for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * stable and unstable pages from all nodes with roots in index 0. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * every node has its own stable and unstable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static inline int get_kpfn_nid(unsigned long kpfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct stable_node *chain = alloc_stable_node();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) VM_BUG_ON(is_stable_node_chain(dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (likely(chain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) INIT_HLIST_HEAD(&chain->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) chain->chain_prune_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) chain->rmap_hlist_len = STABLE_NODE_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) chain->nid = NUMA_NO_NODE; /* debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ksm_stable_node_chains++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Put the stable node chain in the first dimension of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * the stable tree and at the same time remove the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * stable node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rb_replace_node(&dup->node, &chain->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Move the old stable node to the second dimension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * queued in the hlist_dup. The invariant is that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * dup stable_nodes in the chain->hlist point to pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * that are write protected and have the exact same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * content.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) stable_node_chain_add_dup(dup, chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static inline void free_stable_node_chain(struct stable_node *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rb_erase(&chain->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) free_stable_node(chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ksm_stable_node_chains--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static void remove_node_from_stable_tree(struct stable_node *stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct rmap_item *rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* check it's not STABLE_NODE_CHAIN or negative */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) BUG_ON(stable_node->rmap_hlist_len < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (rmap_item->hlist.next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ksm_pages_sharing--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ksm_pages_shared--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) stable_node->rmap_hlist_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) put_anon_vma(rmap_item->anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rmap_item->address &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * We need the second aligned pointer of the migrate_nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * list_head to stay clear from the rb_parent_color union
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * (aligned and different than any node) and also different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * from &migrate_nodes. This will verify that future list.h changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #if defined(GCC_VERSION) && GCC_VERSION >= 40903
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (stable_node->head == &migrate_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) list_del(&stable_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) stable_node_dup_del(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) free_stable_node(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) enum get_ksm_page_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) GET_KSM_PAGE_NOLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) GET_KSM_PAGE_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) GET_KSM_PAGE_TRYLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * get_ksm_page: checks if the page indicated by the stable node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * is still its ksm page, despite having held no reference to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * In which case we can trust the content of the page, and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * returns the gotten page; but if the page has now been zapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * remove the stale node from the stable tree and return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * But beware, the stable node's page might be being migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * You would expect the stable_node to hold a reference to the ksm page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * But if it increments the page's count, swapping out has to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * ksmd to come around again before it can free the page, which may take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * seconds or even minutes: much too unresponsive. So instead we use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * "keyhole reference": access to the ksm page from the stable node peeps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * out through its keyhole to see if that page still holds the right key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * pointing back to this stable node. This relies on freeing a PageAnon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * page to reset its page->mapping to NULL, and relies on no other use of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * a page to put something that might look like our key in page->mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * is on its way to being freed; but it is an anomaly to bear in mind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static struct page *get_ksm_page(struct stable_node *stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) enum get_ksm_page_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) void *expected_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) unsigned long kpfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) expected_mapping = (void *)((unsigned long)stable_node |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) PAGE_MAPPING_KSM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) page = pfn_to_page(kpfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (READ_ONCE(page->mapping) != expected_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) goto stale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * We cannot do anything with the page while its refcount is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Usually 0 means free, or tail of a higher-order page: in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * case this node is no longer referenced, and should be freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * however, it might mean that the page is under page_ref_freeze().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * The __remove_mapping() case is easy, again the node is now stale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * the same is in reuse_ksm_page() case; but if page is swapcache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * in migrate_page_move_mapping(), it might still be our page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * in which case it's essential to keep the node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) while (!get_page_unless_zero(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Another check for page->mapping != expected_mapping would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * work here too. We have chosen the !PageSwapCache test to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * optimize the common case, when the page is or is about to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * be freed: PageSwapCache is cleared (under spin_lock_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * in the ref_freeze section of __remove_mapping(); but Anon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * page->mapping reset to NULL later, in free_pages_prepare().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!PageSwapCache(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto stale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (READ_ONCE(page->mapping) != expected_mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) goto stale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (flags == GET_KSM_PAGE_TRYLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!trylock_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) } else if (flags == GET_KSM_PAGE_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (flags != GET_KSM_PAGE_NOLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (READ_ONCE(page->mapping) != expected_mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) goto stale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) stale:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * We come here from above when page->mapping or !PageSwapCache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * suggests that the node is stale; but it might be under migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * before checking whether node->kpfn has been changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (READ_ONCE(stable_node->kpfn) != kpfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) remove_node_from_stable_tree(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Removing rmap_item from stable or unstable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * This function will clean the information from the stable/unstable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (rmap_item->address & STABLE_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct stable_node *stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) stable_node = rmap_item->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) hlist_del(&rmap_item->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!hlist_empty(&stable_node->hlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ksm_pages_sharing--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ksm_pages_shared--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) stable_node->rmap_hlist_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) put_anon_vma(rmap_item->anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) rmap_item->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) rmap_item->address &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) } else if (rmap_item->address & UNSTABLE_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) unsigned char age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * Usually ksmd can and must skip the rb_erase, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * root_unstable_tree was already reset to RB_ROOT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * But be careful when an mm is exiting: do the rb_erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * if this rmap_item was inserted by this scan, rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * than left over from before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) BUG_ON(age > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!age)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) rb_erase(&rmap_item->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) root_unstable_tree + NUMA(rmap_item->nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ksm_pages_unshared--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) rmap_item->address &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) cond_resched(); /* we're called from many long loops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct rmap_item **rmap_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) while (*rmap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct rmap_item *rmap_item = *rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) *rmap_list = rmap_item->rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) remove_rmap_item_from_tree(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) free_rmap_item(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Though it's very tempting to unmerge rmap_items from stable tree rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * than check every pte of a given vma, the locking doesn't quite work for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * that - an rmap_item is assigned to the stable tree after inserting ksm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * rmap_items from parent to child at fork time (so as not to waste time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * if exit comes before the next scan reaches it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * Similarly, although we'd like to remove rmap_items (so updating counts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * and freeing memory) when unmerging an area, it's easier to leave that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * to the next pass of ksmd - consider, for example, how ksmd might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * in cmp_and_merge_page on one of the rmap_items we would be removing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static int unmerge_ksm_pages(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (ksm_test_exit(vma->vm_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = break_ksm(vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static inline struct stable_node *page_stable_node(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return PageKsm(page) ? page_rmapping(page) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static inline void set_page_stable_node(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct stable_node *stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Only called through the sysfs control interface:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static int remove_stable_node(struct stable_node *stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * get_ksm_page did remove_node_from_stable_tree itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Page could be still mapped if this races with __mmput() running in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * between ksm_exit() and exit_mmap(). Just refuse to let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * merge_across_nodes/max_page_sharing be switched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (!page_mapped(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * The stable node did not yet appear stale to get_ksm_page(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * since that allows for an unmapped ksm page to be recognized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * right up until it is freed; but the node is safe to remove.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * This page might be in a pagevec waiting to be freed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * or it might be PageSwapCache (perhaps under writeback),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * or it might have been removed from swapcache a moment ago.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) set_page_stable_node(page, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) remove_node_from_stable_tree(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int remove_stable_node_chain(struct stable_node *stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct stable_node *dup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct hlist_node *hlist_safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!is_stable_node_chain(stable_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) VM_BUG_ON(is_stable_node_dup(stable_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (remove_stable_node(stable_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) hlist_for_each_entry_safe(dup, hlist_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) &stable_node->hlist, hlist_dup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) VM_BUG_ON(!is_stable_node_dup(dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (remove_stable_node(dup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) BUG_ON(!hlist_empty(&stable_node->hlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) free_stable_node_chain(stable_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static int remove_all_stable_nodes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct stable_node *stable_node, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) for (nid = 0; nid < ksm_nr_node_ids; nid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) while (root_stable_tree[nid].rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) stable_node = rb_entry(root_stable_tree[nid].rb_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct stable_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (remove_stable_node_chain(stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) root_stable_tree + nid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break; /* proceed to next nid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (remove_stable_node(stable_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static int unmerge_and_remove_all_rmap_items(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct mm_slot *mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct mm_slot, mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) for (mm_slot = ksm_scan.mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mm = mm_slot->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) for (vma = mm->mmap; vma; vma = vma->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (ksm_test_exit(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) err = unmerge_ksm_pages(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) vma->vm_start, vma->vm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct mm_slot, mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (ksm_test_exit(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) hash_del(&mm_slot->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) list_del(&mm_slot->mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) free_mm_slot(mm_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) clear_bit(MMF_VM_MERGEABLE, &mm->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* Clean up stable nodes, but don't worry if some are still busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) remove_all_stable_nodes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ksm_scan.seqnr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ksm_scan.mm_slot = &ksm_mm_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #endif /* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static u32 calc_checksum(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u32 checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) void *addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) checksum = xxhash(addr, PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int write_protect_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) pte_t *orig_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct page_vma_mapped_walk pvmw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .vma = vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) int swapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) pvmw.address = page_address_in_vma(page, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (pvmw.address == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) BUG_ON(PageTransCompound(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pvmw.address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) pvmw.address + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!page_vma_mapped_walk(&pvmw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) goto out_mn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) mm_tlb_flush_pending(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) swapped = PageSwapCache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) flush_cache_page(vma, pvmw.address, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Ok this is tricky, when get_user_pages_fast() run it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * take any lock, therefore the check that we are going to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * with the pagecount against the mapcount is racey and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * O_DIRECT can happen right after the check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * So we clear the pte and flush the tlb before the check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * this assure us that no O_DIRECT can happen after the check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * or in the middle of the check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * No need to notify as we are downgrading page table to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * only not changing it to point to a new page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * See Documentation/vm/mmu_notifier.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * Check that no O_DIRECT or similar I/O is in progress on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (page_mapcount(page) + 1 + swapped != page_count(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) set_pte_at(mm, pvmw.address, pvmw.pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (pte_dirty(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (pte_protnone(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) entry = pte_mkclean(pte_clear_savedwrite(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) entry = pte_mkclean(pte_wrprotect(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) *orig_pte = *pvmw.pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) page_vma_mapped_walk_done(&pvmw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) out_mn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) mmu_notifier_invalidate_range_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * replace_page - replace page in vma by new ksm page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * @vma: vma that holds the pte pointing to page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * @page: the page we are replacing by kpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * @kpage: the ksm page we replace page by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * @orig_pte: the original value of the pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * Returns 0 on success, -EFAULT on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int replace_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct page *kpage, pte_t orig_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) pte_t newpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct mmu_notifier_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) addr = page_address_in_vma(page, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (addr == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) pmd = mm_find_pmd(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) mmu_notifier_invalidate_range_start(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (!pte_same(*ptep, orig_pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) goto out_mn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * No need to check ksm_use_zero_pages here: we can only have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * zero_page here if ksm_use_zero_pages was enabled already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!is_zero_pfn(page_to_pfn(kpage))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) get_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) page_add_anon_rmap(kpage, vma, addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) newpte = mk_pte(kpage, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) vma->vm_page_prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * We're replacing an anonymous page with a zero page, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * not anonymous. We need to do proper accounting otherwise we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * will get wrong values in /proc, and a BUG message in dmesg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * when tearing down the mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) dec_mm_counter(mm, MM_ANONPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) flush_cache_page(vma, addr, pte_pfn(*ptep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * No need to notify as we are replacing a read only page with another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * read only page with the same content.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * See Documentation/vm/mmu_notifier.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ptep_clear_flush(vma, addr, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) set_pte_at_notify(mm, addr, ptep, newpte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) page_remove_rmap(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!page_mapped(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) try_to_free_swap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) out_mn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) mmu_notifier_invalidate_range_end(&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * try_to_merge_one_page - take two pages and merge them into one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * @vma: the vma that holds the pte pointing to page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * @page: the PageAnon page that we want to replace with kpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * @kpage: the PageKsm page that we want to map instead of page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * or NULL the first time when we want to use page as kpage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * This function returns 0 if the pages were merged, -EFAULT otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static int try_to_merge_one_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct page *page, struct page *kpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) pte_t orig_pte = __pte(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (page == kpage) /* ksm page forked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!PageAnon(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * We need the page lock to read a stable PageSwapCache in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * write_protect_page(). We use trylock_page() instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * lock_page() because we don't want to wait here - we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * prefer to continue scanning and merging different pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * then come back to this page when it is unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (!trylock_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (PageTransCompound(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (split_huge_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * If this anonymous page is mapped only here, its pte may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * to be write-protected. If it's mapped elsewhere, all of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * ptes are necessarily already write-protected. But in either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * case, we need to lock and check page_count is not raised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (write_protect_page(vma, page, &orig_pte) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!kpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * While we hold page lock, upgrade page from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * PageAnon+anon_vma to PageKsm+NULL stable_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * stable_tree_insert() will update stable_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) set_page_stable_node(page, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) mark_page_accessed(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Page reclaim just frees a clean page with no dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * ptes: make sure that the ksm page would be swapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!PageDirty(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) SetPageDirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) } else if (pages_identical(page, kpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) err = replace_page(vma, page, kpage, orig_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) munlock_vma_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (!PageMlocked(kpage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) lock_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) mlock_vma_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) page = kpage; /* for final unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * but no new kernel page is allocated: kpage must already be a ksm page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * This function returns 0 if the pages were merged, -EFAULT otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct page *page, struct page *kpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct mm_struct *mm = rmap_item->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) vma = find_mergeable_vma(mm, rmap_item->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err = try_to_merge_one_page(vma, page, kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* Unstable nid is in union with stable anon_vma: remove first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) remove_rmap_item_from_tree(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /* Must get reference to anon_vma while still holding mmap_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) rmap_item->anon_vma = vma->anon_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) get_anon_vma(vma->anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * try_to_merge_two_pages - take two identical pages and prepare them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * to be merged into one page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * This function returns the kpage if we successfully merged two identical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * pages into one ksm page, NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * Note that this function upgrades page to ksm page: if one of the pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * is already a ksm page, try_to_merge_with_ksm_page should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct rmap_item *tree_rmap_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct page *tree_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) err = try_to_merge_with_ksm_page(tree_rmap_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) tree_page, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * If that fails, we have a ksm page with only one pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * pointing to it: so break it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) break_cow(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return err ? NULL : page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) VM_BUG_ON(stable_node->rmap_hlist_len < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * Check that at least one mapping still exists, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * there's no much point to merge and share with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * stable_node, as the underlying tree_page of the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * sharer is going to be freed soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return stable_node->rmap_hlist_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) bool is_page_sharing_candidate(struct stable_node *stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return __is_page_sharing_candidate(stable_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct stable_node **_stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) bool prune_stale_stable_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct hlist_node *hlist_safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct page *_tree_page, *tree_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) int nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int found_rmap_hlist_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!prune_stale_stable_nodes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) time_before(jiffies, stable_node->chain_prune_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) msecs_to_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) ksm_stable_node_chains_prune_millisecs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) prune_stale_stable_nodes = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) stable_node->chain_prune_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) hlist_for_each_entry_safe(dup, hlist_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) &stable_node->hlist, hlist_dup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * We must walk all stable_node_dup to prune the stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * stable nodes during lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * get_ksm_page can drop the nodes from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * stable_node->hlist if they point to freed pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * (that's why we do a _safe walk). The "dup"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * stable_node parameter itself will be freed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * under us if it returns NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (!_tree_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) nr += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (is_page_sharing_candidate(dup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (!found ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dup->rmap_hlist_len > found_rmap_hlist_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) put_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) found = dup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) found_rmap_hlist_len = found->rmap_hlist_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) tree_page = _tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* skip put_page for found dup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!prune_stale_stable_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) put_page(_tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * nr is counting all dups in the chain only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * prune_stale_stable_nodes is true, otherwise we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * break the loop at nr == 1 even if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * multiple entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (prune_stale_stable_nodes && nr == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * If there's not just one entry it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * corrupt memory, better BUG_ON. In KSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * context with no lock held it's not even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) BUG_ON(stable_node->hlist.first->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * There's just one entry and it is below the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * deduplication limit so drop the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) rb_replace_node(&stable_node->node, &found->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) free_stable_node(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ksm_stable_node_chains--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ksm_stable_node_dups--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * NOTE: the caller depends on the stable_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * to be equal to stable_node_dup if the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * was collapsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) *_stable_node = found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * Just for robustneess as stable_node is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * otherwise left as a stable pointer, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * compiler shall optimize it away at build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) stable_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) } else if (stable_node->hlist.first != &found->hlist_dup &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) __is_page_sharing_candidate(found, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * If the found stable_node dup can accept one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * more future merge (in addition to the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * that is underway) and is not at the head of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * the chain, put it there so next search will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * be quicker in the !prune_stale_stable_nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * NOTE: it would be inaccurate to use nr > 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * instead of checking the hlist.first pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * directly, because in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * prune_stale_stable_nodes case "nr" isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * the position of the found dup in the chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * but the total number of dups in the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) hlist_del(&found->hlist_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) hlist_add_head(&found->hlist_dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) &stable_node->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) *_stable_node_dup = found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (!is_stable_node_chain(stable_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (hlist_empty(&stable_node->hlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) free_stable_node_chain(stable_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return hlist_entry(stable_node->hlist.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) typeof(*stable_node), hlist_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * Like for get_ksm_page, this function can free the *_stable_node and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * *_stable_node_dup if the returned tree_page is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * It can also free and overwrite *_stable_node with the found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * stable_node_dup if the chain is collapsed (in which case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * *_stable_node will be equal to *_stable_node_dup like if the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * never existed). It's up to the caller to verify tree_page is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * NULL before dereferencing *_stable_node or *_stable_node_dup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * *_stable_node_dup is really a second output parameter of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * function and will be overwritten in all cases, the caller doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * need to initialize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct stable_node **_stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) bool prune_stale_stable_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct stable_node *stable_node = *_stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (!is_stable_node_chain(stable_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (is_page_sharing_candidate(stable_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) *_stable_node_dup = stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * _stable_node_dup set to NULL means the stable_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * reached the ksm_max_page_sharing limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) *_stable_node_dup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return stable_node_dup(_stable_node_dup, _stable_node, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) prune_stale_stable_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct stable_node **s_n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return __stable_node_chain(s_n_d, s_n, root, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static __always_inline struct page *chain(struct stable_node **s_n_d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct stable_node *s_n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct stable_node *old_stable_node = s_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct page *tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* not pruning dups so s_n cannot have changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) VM_BUG_ON(s_n != old_stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * stable_tree_search - search for page inside the stable tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * This function checks if there is a page inside the stable tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * with identical content to the page that we are scanning right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * This function returns the stable tree node of identical content if found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static struct page *stable_tree_search(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct rb_node **new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct rb_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct stable_node *page_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) page_node = page_stable_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (page_node && page_node->head != &migrate_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* ksm page forked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) nid = get_kpfn_nid(page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) root = root_stable_tree + nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) new = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct page *tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) stable_node = rb_entry(*new, struct stable_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) stable_node_any = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) tree_page = chain_prune(&stable_node_dup, &stable_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * NOTE: stable_node may have been freed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * chain_prune() if the returned stable_node_dup is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * not NULL. stable_node_dup may have been inserted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * the rbtree instead as a regular stable_node (in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * order to collapse the stable_node chain if a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * stable_node dup was found in it). In such case the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * stable_node is overwritten by the calleee to point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * to the stable_node_dup that was collapsed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * stable rbtree and stable_node will be equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * stable_node_dup like if the chain never existed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!stable_node_dup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * Either all stable_node dups were full in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * this stable_node chain, or this chain was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * empty and should be rb_erased.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) stable_node_any = stable_node_dup_any(stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (!stable_node_any) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* rb_erase just run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * Take any of the stable_node dups page of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * this stable_node chain to let the tree walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * continue. All KSM pages belonging to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * stable_node dups in a stable_node chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * have the same content and they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * write protected at all times. Any will work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * fine to continue the walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) tree_page = get_ksm_page(stable_node_any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) GET_KSM_PAGE_NOLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (!tree_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * If we walked over a stale stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * get_ksm_page() will call rb_erase() and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * may rebalance the tree from under us. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * restart the search from scratch. Returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * NULL would be safe too, but we'd generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * false negative insertions just because some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * stable_node was stale.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ret = memcmp_pages(page, tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) put_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) new = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) new = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (page_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) VM_BUG_ON(page_node->head != &migrate_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * Test if the migrated page should be merged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * into a stable node dup. If the mapcount is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * 1 we can migrate it with another KSM page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * without adding it to the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (page_mapcount(page) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) goto chain_append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!stable_node_dup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * If the stable_node is a chain and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * we got a payload match in memcmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * but we cannot merge the scanned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * page in any of the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * stable_node dups because they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * all full, we need to wait the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * scanned page to find itself a match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * in the unstable tree to create a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * brand new KSM page to add later to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * the dups of this stable_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * Lock and unlock the stable_node's page (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * might already have been migrated) so that page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * migration is sure to notice its raised count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * It would be more elegant to return stable_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * than kpage, but that involves more changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) tree_page = get_ksm_page(stable_node_dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) GET_KSM_PAGE_TRYLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (PTR_ERR(tree_page) == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (unlikely(!tree_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * The tree may have been rebalanced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * so re-evaluate parent and new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) unlock_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (get_kpfn_nid(stable_node_dup->kpfn) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) NUMA(stable_node_dup->nid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) put_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) goto replace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (!page_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) list_del(&page_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) DO_NUMA(page_node->nid = nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) rb_link_node(&page_node->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) rb_insert_color(&page_node->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (is_page_sharing_candidate(page_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) replace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * If stable_node was a chain and chain_prune collapsed it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * stable_node has been updated to be the new regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * stable_node. A collapse of the chain is indistinguishable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * from the case there was no chain in the stable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * rbtree. Otherwise stable_node is the chain and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * stable_node_dup is the dup to replace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (stable_node_dup == stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) VM_BUG_ON(is_stable_node_chain(stable_node_dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) VM_BUG_ON(is_stable_node_dup(stable_node_dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* there is no chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (page_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) VM_BUG_ON(page_node->head != &migrate_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) list_del(&page_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) DO_NUMA(page_node->nid = nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) rb_replace_node(&stable_node_dup->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) &page_node->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (is_page_sharing_candidate(page_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) rb_erase(&stable_node_dup->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) VM_BUG_ON(!is_stable_node_chain(stable_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) __stable_node_dup_del(stable_node_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (page_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) VM_BUG_ON(page_node->head != &migrate_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) list_del(&page_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) DO_NUMA(page_node->nid = nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) stable_node_chain_add_dup(page_node, stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (is_page_sharing_candidate(page_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) stable_node_dup->head = &migrate_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) list_add(&stable_node_dup->list, stable_node_dup->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) chain_append:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* stable_node_dup could be null if it reached the limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (!stable_node_dup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) stable_node_dup = stable_node_any;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * If stable_node was a chain and chain_prune collapsed it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * stable_node has been updated to be the new regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * stable_node. A collapse of the chain is indistinguishable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * from the case there was no chain in the stable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * rbtree. Otherwise stable_node is the chain and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * stable_node_dup is the dup to replace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (stable_node_dup == stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) VM_BUG_ON(is_stable_node_chain(stable_node_dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) VM_BUG_ON(is_stable_node_dup(stable_node_dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* chain is missing so create it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) stable_node = alloc_stable_node_chain(stable_node_dup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (!stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * Add this stable_node dup that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * migrated to the stable_node chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * of the current nid for this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * content.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) VM_BUG_ON(!is_stable_node_chain(stable_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) VM_BUG_ON(page_node->head != &migrate_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) list_del(&page_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) DO_NUMA(page_node->nid = nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) stable_node_chain_add_dup(page_node, stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * stable_tree_insert - insert stable tree node pointing to new ksm page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * into the stable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * This function returns the stable tree node just allocated on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) static struct stable_node *stable_tree_insert(struct page *kpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) unsigned long kpfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) struct rb_node **new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct rb_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) bool need_chain = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) kpfn = page_to_pfn(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) nid = get_kpfn_nid(kpfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) root = root_stable_tree + nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) new = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct page *tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) stable_node = rb_entry(*new, struct stable_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) stable_node_any = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) tree_page = chain(&stable_node_dup, stable_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (!stable_node_dup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * Either all stable_node dups were full in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * this stable_node chain, or this chain was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * empty and should be rb_erased.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) stable_node_any = stable_node_dup_any(stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (!stable_node_any) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) /* rb_erase just run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * Take any of the stable_node dups page of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * this stable_node chain to let the tree walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * continue. All KSM pages belonging to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * stable_node dups in a stable_node chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * have the same content and they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * write protected at all times. Any will work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * fine to continue the walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) tree_page = get_ksm_page(stable_node_any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) GET_KSM_PAGE_NOLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (!tree_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * If we walked over a stale stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * get_ksm_page() will call rb_erase() and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * may rebalance the tree from under us. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * restart the search from scratch. Returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * NULL would be safe too, but we'd generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * false negative insertions just because some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * stable_node was stale.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) ret = memcmp_pages(kpage, tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) put_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) new = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) new = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) need_chain = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) stable_node_dup = alloc_stable_node();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (!stable_node_dup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) INIT_HLIST_HEAD(&stable_node_dup->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) stable_node_dup->kpfn = kpfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) set_page_stable_node(kpage, stable_node_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) stable_node_dup->rmap_hlist_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) DO_NUMA(stable_node_dup->nid = nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (!need_chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) rb_link_node(&stable_node_dup->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) rb_insert_color(&stable_node_dup->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (!is_stable_node_chain(stable_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct stable_node *orig = stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /* chain is missing so create it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) stable_node = alloc_stable_node_chain(orig, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (!stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) free_stable_node(stable_node_dup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) stable_node_chain_add_dup(stable_node_dup, stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return stable_node_dup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * unstable_tree_search_insert - search for identical page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * else insert rmap_item into the unstable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * This function searches for a page in the unstable tree identical to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * page currently being scanned; and if no identical page is found in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * tree, we insert rmap_item as a new object into the unstable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * This function returns pointer to rmap_item found to be identical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * to the currently scanned page, NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * This function does both searching and inserting, because they share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * the same walking algorithm in an rbtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct page **tree_pagep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct rb_node **new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) struct rb_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) nid = get_kpfn_nid(page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) root = root_unstable_tree + nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) new = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) struct rmap_item *tree_rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) struct page *tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) tree_rmap_item = rb_entry(*new, struct rmap_item, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) tree_page = get_mergeable_page(tree_rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (!tree_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * Don't substitute a ksm page for a forked page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (page == tree_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) put_user_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) ret = memcmp_pages(page, tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) put_user_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) new = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) } else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) put_user_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) new = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) } else if (!ksm_merge_across_nodes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) page_to_nid(tree_page) != nid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * If tree_page has been migrated to another NUMA node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * it will be flushed out and put in the right unstable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * tree next time: only merge with it when across_nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) put_user_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) *tree_pagep = tree_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return tree_rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) rmap_item->address |= UNSTABLE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) DO_NUMA(rmap_item->nid = nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) rb_link_node(&rmap_item->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) rb_insert_color(&rmap_item->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ksm_pages_unshared++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * stable_tree_append - add another rmap_item to the linked list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * rmap_items hanging off a given node of the stable tree, all sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * the same ksm page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static void stable_tree_append(struct rmap_item *rmap_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) struct stable_node *stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) bool max_page_sharing_bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * rmap won't find this mapping if we don't insert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * rmap_item in the right stable_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * duplicate. page_migration could break later if rmap breaks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * so we can as well crash here. We really need to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * for other negative values as an underflow if detected here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * for the first time (and not when decreasing rmap_hlist_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * would be sign of memory corruption in the stable_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) BUG_ON(stable_node->rmap_hlist_len < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) stable_node->rmap_hlist_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (!max_page_sharing_bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* possibly non fatal but unexpected overflow, only warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) WARN_ON_ONCE(stable_node->rmap_hlist_len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) ksm_max_page_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) rmap_item->head = stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) rmap_item->address |= STABLE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (rmap_item->hlist.next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) ksm_pages_sharing++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) ksm_pages_shared++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * cmp_and_merge_page - first see if page can be merged into the stable tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * if not, compare checksum to previous and if it's the same, see if page can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * be inserted into the unstable tree, or merged with a page already there and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * both transferred to the stable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * @page: the page that we are searching identical page to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * @rmap_item: the reverse mapping into the virtual address of this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct mm_struct *mm = rmap_item->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct rmap_item *tree_rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct page *tree_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct stable_node *stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct page *kpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) unsigned int checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) bool max_page_sharing_bypass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) stable_node = page_stable_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (stable_node->head != &migrate_nodes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) NUMA(stable_node->nid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) stable_node_dup_del(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) stable_node->head = &migrate_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) list_add(&stable_node->list, stable_node->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (stable_node->head != &migrate_nodes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) rmap_item->head == stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * If it's a KSM fork, allow it to go over the sharing limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * without warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (!is_page_sharing_candidate(stable_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) max_page_sharing_bypass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) /* We first start with searching the page inside the stable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) kpage = stable_tree_search(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (kpage == page && rmap_item->head == stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) put_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) remove_rmap_item_from_tree(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (kpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (PTR_ERR(kpage) == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * The page was successfully merged:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * add its rmap_item to the stable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) lock_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) stable_tree_append(rmap_item, page_stable_node(kpage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) max_page_sharing_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) unlock_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) put_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * If the hash value of the page has changed from the last time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * we calculated it, this page is changing frequently: therefore we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * don't want to insert it in the unstable tree, and we don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * to waste our time searching for something identical to it there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) checksum = calc_checksum(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (rmap_item->oldchecksum != checksum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) rmap_item->oldchecksum = checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * Same checksum as an empty page. We attempt to merge it with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) * appropriate zero page if the user enabled this via sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (ksm_use_zero_pages && (checksum == zero_checksum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) vma = find_mergeable_vma(mm, rmap_item->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) err = try_to_merge_one_page(vma, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) ZERO_PAGE(rmap_item->address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * If the vma is out of date, we do not need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * In case of failure, the page was not really empty, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * need to continue. Otherwise we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) tree_rmap_item =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) unstable_tree_search_insert(rmap_item, page, &tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (tree_rmap_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) bool split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) kpage = try_to_merge_two_pages(rmap_item, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) tree_rmap_item, tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * If both pages we tried to merge belong to the same compound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * page, then we actually ended up increasing the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * count of the same compound page twice, and split_huge_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * Here we set a flag if that happened, and we use it later to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * try split_huge_page again. Since we call put_page right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) * afterwards, the reference count will be correct and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) * split_huge_page should succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) split = PageTransCompound(page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) && compound_head(page) == compound_head(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) put_user_page(tree_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (kpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * The pages were successfully merged: insert new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * node in the stable tree and add both rmap_items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) lock_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) stable_node = stable_tree_insert(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) stable_tree_append(tree_rmap_item, stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) stable_tree_append(rmap_item, stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) unlock_page(kpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * If we fail to insert the page into the stable tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * we will have 2 virtual addresses that are pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * to a ksm page left outside the stable tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * in which case we need to break_cow on both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (!stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) break_cow(tree_rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) break_cow(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) } else if (split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * We are here if we tried to merge two pages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * failed because they both belonged to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * compound page. We will split the page now, but no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * merging will take place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * We do not want to add the cost of a full lock; if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * the page is locked, it is better to skip it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * perhaps try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (!trylock_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) split_huge_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct rmap_item **rmap_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) struct rmap_item *rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) while (*rmap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) rmap_item = *rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if ((rmap_item->address & PAGE_MASK) == addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (rmap_item->address > addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) *rmap_list = rmap_item->rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) remove_rmap_item_from_tree(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) free_rmap_item(rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) rmap_item = alloc_rmap_item();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (rmap_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /* It has already been zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) rmap_item->mm = mm_slot->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) rmap_item->address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) rmap_item->rmap_list = *rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) *rmap_list = rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) static struct rmap_item *scan_get_next_rmap_item(struct page **page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) struct mm_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) struct rmap_item *rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (list_empty(&ksm_mm_head.mm_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) slot = ksm_scan.mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (slot == &ksm_mm_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * A number of pages can hang around indefinitely on per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * pagevecs, raised page count preventing write_protect_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * from merging them. Though it doesn't really matter much,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * it is puzzling to see some stuck in pages_volatile until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * other activity jostles them out, and they also prevented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * LTP's KSM test from succeeding deterministically; so drain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * them here (here rather than on entry to ksm_do_scan(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * so we don't IPI too often when pages_to_scan is set low).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) lru_add_drain_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * Whereas stale stable_nodes on the stable_tree itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * get pruned in the regular course of stable_tree_search(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * those moved out to the migrate_nodes list can accumulate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * so prune them once before each full scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (!ksm_merge_across_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) struct stable_node *stable_node, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) list_for_each_entry_safe(stable_node, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) &migrate_nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) page = get_ksm_page(stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) GET_KSM_PAGE_NOLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) for (nid = 0; nid < ksm_nr_node_ids; nid++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) root_unstable_tree[nid] = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) ksm_scan.mm_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * Although we tested list_empty() above, a racing __ksm_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * of the last mm on the list may have removed it since then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (slot == &ksm_mm_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) next_mm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ksm_scan.address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) ksm_scan.rmap_list = &slot->rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) mm = slot->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (ksm_test_exit(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) vma = find_vma(mm, ksm_scan.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) for (; vma; vma = vma->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (!(vma->vm_flags & VM_MERGEABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (ksm_scan.address < vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) ksm_scan.address = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (!vma->anon_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) ksm_scan.address = vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) while (ksm_scan.address < vma->vm_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (ksm_test_exit(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) *page = follow_page(vma, ksm_scan.address, FOLL_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (IS_ERR_OR_NULL(*page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) ksm_scan.address += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (PageAnon(*page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) flush_anon_page(vma, *page, ksm_scan.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) flush_dcache_page(*page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) rmap_item = get_next_rmap_item(slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) ksm_scan.rmap_list, ksm_scan.address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (rmap_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) ksm_scan.rmap_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) &rmap_item->rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) ksm_scan.address += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) put_user_page(*page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) put_user_page(*page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) ksm_scan.address += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (ksm_test_exit(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) ksm_scan.address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) ksm_scan.rmap_list = &slot->rmap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) * Nuke all the rmap_items that are above this current rmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * because there were no VM_MERGEABLE vmas with such addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) ksm_scan.mm_slot = list_entry(slot->mm_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) struct mm_slot, mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (ksm_scan.address == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * We've completed a full scan of all vmas, holding mmap_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * throughout, and found no VM_MERGEABLE: so do the same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * __ksm_exit does to remove this mm from all our lists now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * This applies either when cleaning up after __ksm_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * (but beware: we can reach here even before __ksm_exit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * or when all VM_MERGEABLE areas have been unmapped (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * mmap_lock then protects against race with MADV_MERGEABLE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) hash_del(&slot->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) list_del(&slot->mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) free_mm_slot(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) clear_bit(MMF_VM_MERGEABLE, &mm->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * mmap_read_unlock(mm) first because after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * already have been freed under us by __ksm_exit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * because the "mm_slot" is still hashed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) * ksm_scan.mm_slot doesn't point to it anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /* Repeat until we've completed scanning the whole list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) slot = ksm_scan.mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (slot != &ksm_mm_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) goto next_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ksm_scan.seqnr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * ksm_do_scan - the ksm scanner main worker function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * @scan_npages: number of pages we want to scan before we return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static void ksm_do_scan(unsigned int scan_npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) struct rmap_item *rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) while (scan_npages-- && likely(!freezing(current))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) rmap_item = scan_get_next_rmap_item(&page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (!rmap_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) cmp_and_merge_page(page, rmap_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static int ksmd_should_run(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) static int ksm_scan_thread(void *nothing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) unsigned int sleep_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) set_freezable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) set_user_nice(current, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) wait_while_offlining();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (ksmd_should_run())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) ksm_do_scan(ksm_thread_pages_to_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (ksmd_should_run()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) wait_event_interruptible_timeout(ksm_iter_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) msecs_to_jiffies(sleep_ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) wait_event_freezable(ksm_thread_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) ksmd_should_run() || kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) unsigned long end, int advice, unsigned long *vm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) switch (advice) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) case MADV_MERGEABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * Be somewhat over-protective for now!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) VM_PFNMAP | VM_IO | VM_DONTEXPAND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) VM_HUGETLB | VM_MIXEDMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) return 0; /* just ignore the advice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (vma_is_dax(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) #ifdef VM_SAO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (*vm_flags & VM_SAO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) #ifdef VM_SPARC_ADI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (*vm_flags & VM_SPARC_ADI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) err = __ksm_enter(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) *vm_flags |= VM_MERGEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) case MADV_UNMERGEABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (!(*vm_flags & VM_MERGEABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return 0; /* just ignore the advice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (vma->anon_vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) err = unmerge_ksm_pages(vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) *vm_flags &= ~VM_MERGEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) EXPORT_SYMBOL_GPL(ksm_madvise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) int __ksm_enter(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) struct mm_slot *mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) int needs_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) mm_slot = alloc_mm_slot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (!mm_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Check ksm_run too? Would need tighter locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) needs_wakeup = list_empty(&ksm_mm_head.mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) insert_to_mm_slots_hash(mm, mm_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * When KSM_RUN_MERGE (or KSM_RUN_STOP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * insert just behind the scanning cursor, to let the area settle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * down a little; when fork is followed by immediate exec, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * want ksmd to waste time setting up and tearing down an rmap_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * scanning cursor, otherwise KSM pages in newly forked mms will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * missed: then we might as well insert at the end of the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (ksm_run & KSM_RUN_UNMERGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) set_bit(MMF_VM_MERGEABLE, &mm->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) mmgrab(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (needs_wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) wake_up_interruptible(&ksm_thread_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) void __ksm_exit(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) struct mm_slot *mm_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) int easy_to_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * This process is exiting: if it's straightforward (as is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * case when ksmd was never running), free mm_slot immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * But if it's at the cursor or has rmap_items linked to it, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * mmap_lock to synchronize with any break_cows before pagetables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * are freed, and leave the mm_slot on the list for ksmd to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * Beware: ksm may already have noticed it exiting and freed the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) spin_lock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) mm_slot = get_mm_slot(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (mm_slot && ksm_scan.mm_slot != mm_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (!mm_slot->rmap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) hash_del(&mm_slot->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) list_del(&mm_slot->mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) easy_to_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) list_move(&mm_slot->mm_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) &ksm_scan.mm_slot->mm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) spin_unlock(&ksm_mmlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (easy_to_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) free_mm_slot(mm_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) clear_bit(MMF_VM_MERGEABLE, &mm->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) } else if (mm_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct page *ksm_might_need_to_copy(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct vm_area_struct *vma, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) struct anon_vma *anon_vma = page_anon_vma(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) struct page *new_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (PageKsm(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (page_stable_node(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) !(ksm_run & KSM_RUN_UNMERGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return page; /* no need to copy it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) } else if (!anon_vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) return page; /* no need to copy it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) } else if (anon_vma->root == vma->anon_vma->root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) page->index == linear_page_index(vma, address)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return page; /* still no need to copy it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return page; /* let do_swap_page report the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) put_page(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) new_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (new_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) copy_user_highpage(new_page, page, address, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) SetPageDirty(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) __SetPageUptodate(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) __SetPageLocked(new_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) return new_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) struct stable_node *stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) struct rmap_item *rmap_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) int search_new_forks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) VM_BUG_ON_PAGE(!PageKsm(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * Rely on the page lock to protect against concurrent modifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) * to that page's node of the stable tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) stable_node = page_stable_node(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (!stable_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct anon_vma *anon_vma = rmap_item->anon_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) struct anon_vma_chain *vmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) anon_vma_lock_read(anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 0, ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) vma = vmac->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* Ignore the stable/unstable/sqnr flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) addr = rmap_item->address & ~KSM_FLAG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (addr < vma->vm_start || addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * Initially we examine only the vma which covers this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * rmap_item; but later, if there is still work to do,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * we examine covering vmas in other mms: in case they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * were forked from the original since ksmd passed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) anon_vma_unlock_read(anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (rwc->done && rwc->done(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) anon_vma_unlock_read(anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) anon_vma_unlock_read(anon_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (!search_new_forks++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) void ksm_migrate_page(struct page *newpage, struct page *oldpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) struct stable_node *stable_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) stable_node = page_stable_node(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (stable_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) stable_node->kpfn = page_to_pfn(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * newpage->mapping was set in advance; now we need smp_wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * to make sure that the new stable_node->kpfn is visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * to get_ksm_page() before it can see that oldpage->mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * has gone stale (or that PageSwapCache has been cleared).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) set_page_stable_node(oldpage, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) #endif /* CONFIG_MIGRATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) #ifdef CONFIG_MEMORY_HOTREMOVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static void wait_while_offlining(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) while (ksm_run & KSM_RUN_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) static bool stable_node_dup_remove_range(struct stable_node *stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) unsigned long end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (stable_node->kpfn >= start_pfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) stable_node->kpfn < end_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * Don't get_ksm_page, page has already gone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * which is why we keep kpfn instead of page*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) remove_node_from_stable_tree(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) static bool stable_node_chain_remove_range(struct stable_node *stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) unsigned long end_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct stable_node *dup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct hlist_node *hlist_safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) if (!is_stable_node_chain(stable_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) VM_BUG_ON(is_stable_node_dup(stable_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) return stable_node_dup_remove_range(stable_node, start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) hlist_for_each_entry_safe(dup, hlist_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) &stable_node->hlist, hlist_dup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) VM_BUG_ON(!is_stable_node_dup(dup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) stable_node_dup_remove_range(dup, start_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (hlist_empty(&stable_node->hlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) free_stable_node_chain(stable_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return true; /* notify caller that tree was rebalanced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) static void ksm_check_stable_tree(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) unsigned long end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) struct stable_node *stable_node, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) for (nid = 0; nid < ksm_nr_node_ids; nid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) node = rb_first(root_stable_tree + nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) stable_node = rb_entry(node, struct stable_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (stable_node_chain_remove_range(stable_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) start_pfn, end_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) root_stable_tree +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) node = rb_first(root_stable_tree + nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) node = rb_next(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (stable_node->kpfn >= start_pfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) stable_node->kpfn < end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) remove_node_from_stable_tree(stable_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) static int ksm_memory_callback(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) unsigned long action, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) struct memory_notify *mn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) case MEM_GOING_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) * and remove_all_stable_nodes() while memory is going offline:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) * it is unsafe for them to touch the stable tree at this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * But unmerge_ksm_pages(), rmap lookups and other entry points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * which do not need the ksm_thread_mutex are all safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) ksm_run |= KSM_RUN_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) case MEM_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * Most of the work is done by page migration; but there might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * be a few stable_nodes left over, still pointing to struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * pages which have been offlined: prune those from the tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * otherwise get_ksm_page() might later try to access a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * non-existent struct page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) ksm_check_stable_tree(mn->start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) mn->start_pfn + mn->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) case MEM_CANCEL_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) ksm_run &= ~KSM_RUN_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) smp_mb(); /* wake_up_bit advises this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) static void wait_while_offlining(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) #endif /* CONFIG_MEMORY_HOTREMOVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) * This all compiles without CONFIG_SYSFS, but is a waste of space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) #define KSM_ATTR_RO(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) #define KSM_ATTR(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) static struct kobj_attribute _name##_attr = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) __ATTR(_name, 0644, _name##_show, _name##_store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) static ssize_t sleep_millisecs_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) static ssize_t sleep_millisecs_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) unsigned long msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) err = kstrtoul(buf, 10, &msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (err || msecs > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) ksm_thread_sleep_millisecs = msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) wake_up_interruptible(&ksm_iter_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) KSM_ATTR(sleep_millisecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) static ssize_t pages_to_scan_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) static ssize_t pages_to_scan_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) unsigned long nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) err = kstrtoul(buf, 10, &nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (err || nr_pages > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) ksm_thread_pages_to_scan = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) KSM_ATTR(pages_to_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) return sprintf(buf, "%lu\n", ksm_run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) err = kstrtoul(buf, 10, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (err || flags > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (flags > KSM_RUN_UNMERGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) * breaking COW to free the pages_shared (but leaves mm_slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) * on the list for when ksmd may be set running again).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) wait_while_offlining();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (ksm_run != flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) ksm_run = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) if (flags & KSM_RUN_UNMERGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) set_current_oom_origin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) err = unmerge_and_remove_all_rmap_items();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) clear_current_oom_origin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) ksm_run = KSM_RUN_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) count = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (flags & KSM_RUN_MERGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) wake_up_interruptible(&ksm_thread_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) KSM_ATTR(run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) static ssize_t merge_across_nodes_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) return sprintf(buf, "%u\n", ksm_merge_across_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static ssize_t merge_across_nodes_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) unsigned long knob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) err = kstrtoul(buf, 10, &knob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (knob > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) wait_while_offlining();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) if (ksm_merge_across_nodes != knob) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (ksm_pages_shared || remove_all_stable_nodes())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) else if (root_stable_tree == one_stable_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) struct rb_root *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) * This is the first time that we switch away from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) * default of merging across nodes: must now allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * a buffer to hold as many roots as may be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * Allocate stable and unstable together:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * MAXSMP NODES_SHIFT 10 will use 16kB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) /* Let us assume that RB_ROOT is NULL is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) root_stable_tree = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) root_unstable_tree = buf + nr_node_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) /* Stable tree is empty but not the unstable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) root_unstable_tree[0] = one_unstable_tree[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) ksm_merge_across_nodes = knob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) ksm_nr_node_ids = knob ? 1 : nr_node_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return err ? err : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) KSM_ATTR(merge_across_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) static ssize_t use_zero_pages_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) return sprintf(buf, "%u\n", ksm_use_zero_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) static ssize_t use_zero_pages_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) bool value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) err = kstrtobool(buf, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) ksm_use_zero_pages = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) KSM_ATTR(use_zero_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) static ssize_t max_page_sharing_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) return sprintf(buf, "%u\n", ksm_max_page_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static ssize_t max_page_sharing_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) int knob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) err = kstrtoint(buf, 10, &knob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * When a KSM page is created it is shared by 2 mappings. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * being a signed comparison, it implicitly verifies it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) * negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (knob < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) if (READ_ONCE(ksm_max_page_sharing) == knob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) mutex_lock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) wait_while_offlining();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (ksm_max_page_sharing != knob) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (ksm_pages_shared || remove_all_stable_nodes())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) ksm_max_page_sharing = knob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) mutex_unlock(&ksm_thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) return err ? err : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) KSM_ATTR(max_page_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) static ssize_t pages_shared_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) return sprintf(buf, "%lu\n", ksm_pages_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) KSM_ATTR_RO(pages_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) static ssize_t pages_sharing_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) return sprintf(buf, "%lu\n", ksm_pages_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) KSM_ATTR_RO(pages_sharing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) static ssize_t pages_unshared_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) return sprintf(buf, "%lu\n", ksm_pages_unshared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) KSM_ATTR_RO(pages_unshared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) static ssize_t pages_volatile_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) long ksm_pages_volatile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) - ksm_pages_sharing - ksm_pages_unshared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) * It was not worth any locking to calculate that statistic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) * but it might therefore sometimes be negative: conceal that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (ksm_pages_volatile < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) ksm_pages_volatile = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) return sprintf(buf, "%ld\n", ksm_pages_volatile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) KSM_ATTR_RO(pages_volatile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) static ssize_t stable_node_dups_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return sprintf(buf, "%lu\n", ksm_stable_node_dups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) KSM_ATTR_RO(stable_node_dups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) static ssize_t stable_node_chains_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) return sprintf(buf, "%lu\n", ksm_stable_node_chains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) KSM_ATTR_RO(stable_node_chains);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) stable_node_chains_prune_millisecs_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) return sprintf(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) stable_node_chains_prune_millisecs_store(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) struct kobj_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) unsigned long msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) err = kstrtoul(buf, 10, &msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) if (err || msecs > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) ksm_stable_node_chains_prune_millisecs = msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) KSM_ATTR(stable_node_chains_prune_millisecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) static ssize_t full_scans_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) return sprintf(buf, "%lu\n", ksm_scan.seqnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) KSM_ATTR_RO(full_scans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static struct attribute *ksm_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) &sleep_millisecs_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) &pages_to_scan_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) &run_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) &pages_shared_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) &pages_sharing_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) &pages_unshared_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) &pages_volatile_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) &full_scans_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) &merge_across_nodes_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) &max_page_sharing_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) &stable_node_chains_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) &stable_node_dups_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) &stable_node_chains_prune_millisecs_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) &use_zero_pages_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) static const struct attribute_group ksm_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) .attrs = ksm_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) .name = "ksm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) #endif /* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) static int __init ksm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) struct task_struct *ksm_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) /* The correct value depends on page size and endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) zero_checksum = calc_checksum(ZERO_PAGE(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) /* Default to false for backwards compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) ksm_use_zero_pages = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) err = ksm_slab_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (IS_ERR(ksm_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) pr_err("ksm: creating kthread failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) err = PTR_ERR(ksm_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) err = sysfs_create_group(mm_kobj, &ksm_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) pr_err("ksm: register sysfs failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) kthread_stop(ksm_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) #endif /* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) #ifdef CONFIG_MEMORY_HOTREMOVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) /* There is no significance to this priority 100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) hotplug_memory_notifier(ksm_memory_callback, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) ksm_slab_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) subsys_initcall(ksm_init);