Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/mm/mmu_notifier.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 2008  Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Copyright (C) 2008  SGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *             Christoph Lameter <cl@linux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/interval_tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) /* global SRCU for all MMs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) DEFINE_STATIC_SRCU(srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	.name = "mmu_notifier_invalidate_range_start"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * The mmu_notifier_subscriptions structure is allocated and installed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * mm->notifier_subscriptions inside the mm_take_all_locks() protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * critical section and it's released only when mm_count reaches zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * in mmdrop().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) struct mmu_notifier_subscriptions {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	 * WARNING: hdr should be the first member of this structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	 * so that it can be typecasted into mmu_notifier_subscriptions_hdr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	 * This is required to avoid KMI CRC breakage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct mmu_notifier_subscriptions_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	/* all mmu notifiers registered in this mm are queued in this list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct hlist_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	bool has_itree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	/* to serialize the list modifications and hlist_unhashed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	unsigned long invalidate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	unsigned long active_invalidate_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct rb_root_cached itree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	wait_queue_head_t wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct hlist_head deferred_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * This is a collision-retry read-side/write-side 'lock', a lot like a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * seqcount, however this allows multiple write-sides to hold it at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * once. Conceptually the write side is protecting the values of the PTEs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * writer exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * Note that the core mm creates nested invalidate_range_start()/end() regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * within the same thread, and runs invalidate_range_start()/end() in parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * on multiple CPUs. This is designed to not reduce concurrency or block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * progress on the mm side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * As a secondary function, holding the full write side also serves to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * writers for the itree, this is an optimization to avoid extra locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * during invalidate_range_start/end notifiers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * The write side has two states, fully excluded:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  *  - mm->active_invalidate_ranges != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  *  - subscriptions->invalidate_seq & 1 == True (odd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  *  - some range on the mm_struct is being invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  *  - the itree is not allowed to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * And partially excluded:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  *  - mm->active_invalidate_ranges != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  *  - subscriptions->invalidate_seq & 1 == False (even)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  *  - some range on the mm_struct is being invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  *  - the itree is allowed to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  *    seq |= 1  # Begin writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  *    seq++     # Release the writing state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  *    seq & 1   # True if a writer exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * The later state avoids some expensive work on inv_end in the common case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * no mmu_interval_notifier monitoring the VA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	lockdep_assert_held(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return subscriptions->invalidate_seq & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static struct mmu_interval_notifier *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 			 const struct mmu_notifier_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 			 unsigned long *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct interval_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct mmu_interval_notifier *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	spin_lock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	subscriptions->active_invalidate_ranges++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	node = interval_tree_iter_first(&subscriptions->itree, range->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 					range->end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		subscriptions->invalidate_seq |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		res = container_of(node, struct mmu_interval_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				   interval_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	*seq = subscriptions->invalidate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) static struct mmu_interval_notifier *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		  const struct mmu_notifier_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct interval_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	node = interval_tree_iter_next(&interval_sub->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				       range->start, range->end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return container_of(node, struct mmu_interval_notifier, interval_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	struct mmu_interval_notifier *interval_sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct hlist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	spin_lock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (--subscriptions->active_invalidate_ranges ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	    !mn_itree_is_invalidating(subscriptions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/* Make invalidate_seq even */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	subscriptions->invalidate_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 * Adds and removes are queued until the final inv_end happens then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 * they are progressed. This arrangement for tree updates is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 * avoid using a blocking lock during invalidate_range_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	hlist_for_each_entry_safe(interval_sub, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 				  &subscriptions->deferred_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				  deferred_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			interval_tree_insert(&interval_sub->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 					     &subscriptions->itree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			interval_tree_remove(&interval_sub->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 					     &subscriptions->itree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		hlist_del(&interval_sub->deferred_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	wake_up_all(&subscriptions->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * mmu_interval_read_begin - Begin a read side critical section against a VA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  *                           range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * @interval_sub: The interval subscription
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * collision-retry scheme similar to seqcount for the VA range under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * subscription. If the mm invokes invalidation during the critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * then mmu_interval_read_retry() will return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * require a blocking context.  The critical region formed by this can sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * and the required 'user_lock' can also be a sleeping lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * The caller is required to provide a 'user_lock' to serialize both teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * and setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * The return value should be passed to mmu_interval_read_retry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct mmu_notifier_subscriptions *subscriptions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		interval_sub->mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	unsigned long seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	bool is_invalidating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 * If the subscription has a different seq value under the user_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 * than we started with then it has collided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * If the subscription currently has the same seq value as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * subscriptions seq, then it is currently between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * invalidate_start/end and is colliding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	 * The locking looks broadly like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 *   mn_tree_invalidate_start():          mmu_interval_read_begin():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 *                                         spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 *                                          seq = READ_ONCE(interval_sub->invalidate_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 *                                          seq == subs->invalidate_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 *                                         spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 *    spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	 *     seq = ++subscriptions->invalidate_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	 *    spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	 *     op->invalidate_range():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	 *       user_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	 *        mmu_interval_set_seq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	 *         interval_sub->invalidate_seq = seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 *       user_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 *                          [Required: mmu_interval_read_retry() == true]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 *   mn_itree_inv_end():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 *    spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 *     seq = ++subscriptions->invalidate_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 *    spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 *                                        user_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	 *                                         mmu_interval_read_retry():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 *                                          interval_sub->invalidate_seq != seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	 *                                        user_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	 * Barriers are not needed here as any races here are closed by an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	 * eventual mmu_interval_read_retry(), which provides a barrier via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	 * user_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	spin_lock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	seq = READ_ONCE(interval_sub->invalidate_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	is_invalidating = seq == subscriptions->invalidate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	 * interval_sub->invalidate_seq must always be set to an odd value via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	 * mmu_interval_set_seq() using the provided cur_seq from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	 * will always clear the below sleep in some reasonable time as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	 * subscriptions->invalidate_seq is even in the idle state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (is_invalidating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		wait_event(subscriptions->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			   READ_ONCE(subscriptions->invalidate_seq) != seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 * Notice that mmu_interval_read_retry() can already be true at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * point, avoiding loops here allows the caller to provide a global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 * time bound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	return seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			     struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct mmu_notifier_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		.flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		.event = MMU_NOTIFY_RELEASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		.mm = mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		.start = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		.end = ULONG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	struct mmu_interval_notifier *interval_sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	unsigned long cur_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	for (interval_sub =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		     mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	     interval_sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	     interval_sub = mn_itree_inv_next(interval_sub, &range)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		ret = interval_sub->ops->invalidate(interval_sub, &range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 						    cur_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		WARN_ON(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	mn_itree_inv_end(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * This function can't run concurrently against mmu_notifier_register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  * in parallel despite there being no task using this mm any more,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * through the vmas outside of the exit_mmap context, such as with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * vmtruncate. This serializes against mmu_notifier_unregister with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * the notifier_subscriptions->lock in addition to SRCU and it serializes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * can't go away from under us as exit_mmap holds an mm_count pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			     struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	 * SRCU here will block mmu_notifier_unregister until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * ->release returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 				 srcu_read_lock_held(&srcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		 * If ->release runs before mmu_notifier_unregister it must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		 * handled, as it's the only way for the driver to flush all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		 * existing sptes and stop the driver from establishing any more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		 * sptes before all the pages in the mm are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		if (subscription->ops->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			subscription->ops->release(subscription, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	spin_lock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	while (unlikely(!hlist_empty(&subscriptions->list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		subscription = hlist_entry(subscriptions->list.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 					   struct mmu_notifier, hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		 * We arrived before mmu_notifier_unregister so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		 * mmu_notifier_unregister will do nothing other than to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		 * for ->release to finish and for mmu_notifier_unregister to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		 * return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		hlist_del_init_rcu(&subscription->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * synchronize_srcu here prevents mmu_notifier_release from returning to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 * exit_mmap (which would proceed with freeing all pages in the mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	 * until the ->release method returns, if it was invoked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	 * mmu_notifier_unregister.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 * The notifier_subscriptions can't go away from under us because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 * one mm_count is held by exit_mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	synchronize_srcu(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) void __mmu_notifier_release(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	struct mmu_notifier_subscriptions *subscriptions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (subscriptions->has_itree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		mn_itree_release(subscriptions, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (!hlist_empty(&subscriptions->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		mn_hlist_release(subscriptions, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * If no young bitflag is supported by the hardware, ->clear_flush_young can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * unmap the address and return 1 or 0 depending if the mapping previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  * existed or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 					unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 					unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	int young = 0, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	hlist_for_each_entry_rcu(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				 &mm->notifier_subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		if (subscription->ops->clear_flush_young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			young |= subscription->ops->clear_flush_young(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				subscription, mm, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) int __mmu_notifier_clear_young(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			       unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			       unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	int young = 0, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	hlist_for_each_entry_rcu(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				 &mm->notifier_subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		if (subscription->ops->clear_young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			young |= subscription->ops->clear_young(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 								mm, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) int __mmu_notifier_test_young(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			      unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	int young = 0, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	hlist_for_each_entry_rcu(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				 &mm->notifier_subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		if (subscription->ops->test_young) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			young = subscription->ops->test_young(subscription, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 							      address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			if (young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	return young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			       pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	hlist_for_each_entry_rcu(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 				 &mm->notifier_subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		if (subscription->ops->change_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			subscription->ops->change_pte(subscription, mm, address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 						      pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			       const struct mmu_notifier_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct mmu_interval_notifier *interval_sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	unsigned long cur_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	for (interval_sub =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		     mn_itree_inv_start_range(subscriptions, range, &cur_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	     interval_sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	     interval_sub = mn_itree_inv_next(interval_sub, range)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		ret = interval_sub->ops->invalidate(interval_sub, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 						    cur_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			if (WARN_ON(mmu_notifier_range_blockable(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			goto out_would_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) out_would_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * On -EAGAIN the non-blocking caller is not allowed to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * invalidate_range_end()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	mn_itree_inv_end(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static int mn_hlist_invalidate_range_start(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct mmu_notifier_subscriptions *subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct mmu_notifier_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		const struct mmu_notifier_ops *ops = subscription->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		if (ops->invalidate_range_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			int _ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			if (!mmu_notifier_range_blockable(range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				non_block_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			_ret = ops->invalidate_range_start(subscription, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			if (!mmu_notifier_range_blockable(range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 				non_block_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			if (_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 				pr_info("%pS callback failed with %d in %sblockable context.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 					ops->invalidate_range_start, _ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 					!mmu_notifier_range_blockable(range) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 						"non-" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 						"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 				WARN_ON(mmu_notifier_range_blockable(range) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 					_ret != -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				 * We call all the notifiers on any EAGAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				 * there is no way for a notifier to know if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				 * its start method failed, thus a start that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 				 * does EAGAIN can't also do end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 				WARN_ON(ops->invalidate_range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 				ret = _ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		 * Must be non-blocking to get here.  If there are multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		 * notifiers and one or more failed start, any that succeeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		 * start are expecting their end to be called.  Do so now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		hlist_for_each_entry_rcu(subscription, &subscriptions->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 					 hlist, srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			if (!subscription->ops->invalidate_range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			subscription->ops->invalidate_range_end(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 								range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct mmu_notifier_subscriptions *subscriptions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		range->mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (subscriptions->has_itree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		ret = mn_itree_invalidate(subscriptions, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (!hlist_empty(&subscriptions->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return mn_hlist_invalidate_range_start(subscriptions, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			struct mmu_notifier_range *range, bool only_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		 * Call invalidate_range here too to avoid the need for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		 * subsystem of having to register an invalidate_range_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		 * call-back when there is invalidate_range already. Usually a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		 * subsystem registers either invalidate_range_start()/end() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		 * invalidate_range(), so this will be no additional overhead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		 * (besides the pointer check).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		 * We skip call to invalidate_range() if we know it is safe ie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		 * call site use mmu_notifier_invalidate_range_only_end() which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		 * is safe to do when we know that a call to invalidate_range()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 * already happen under page table lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		if (!only_end && subscription->ops->invalidate_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			subscription->ops->invalidate_range(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 							    range->mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 							    range->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 							    range->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		if (subscription->ops->invalidate_range_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			if (!mmu_notifier_range_blockable(range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 				non_block_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			subscription->ops->invalidate_range_end(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 								range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			if (!mmu_notifier_range_blockable(range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				non_block_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 					 bool only_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct mmu_notifier_subscriptions *subscriptions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		range->mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (subscriptions->has_itree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		mn_itree_inv_end(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (!hlist_empty(&subscriptions->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		mn_hlist_invalidate_end(subscriptions, range, only_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) void __mmu_notifier_invalidate_range(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				  unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	hlist_for_each_entry_rcu(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 				 &mm->notifier_subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 				 srcu_read_lock_held(&srcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		if (subscription->ops->invalidate_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			subscription->ops->invalidate_range(subscription, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 							    start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) static inline void mmu_notifier_write_lock(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	percpu_down_write(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	percpu_up_write(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) #else /* CONFIG_SPECULATIVE_PAGE_FAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static inline void mmu_notifier_write_lock(struct mm_struct *mm) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static inline void mmu_notifier_write_unlock(struct mm_struct *mm) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	INIT_HLIST_HEAD(&subscriptions->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	spin_lock_init(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	subscriptions->invalidate_seq = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	subscriptions->itree = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	init_waitqueue_head(&subscriptions->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	INIT_HLIST_HEAD(&subscriptions->deferred_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * write mode. A NULL mn signals the notifier is being registered for itree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  * mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) int __mmu_notifier_register(struct mmu_notifier *subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			    struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct mmu_notifier_subscriptions *subscriptions = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	mmap_assert_write_locked(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		fs_reclaim_acquire(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		fs_reclaim_release(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (!mm->notifier_subscriptions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		 * kmalloc cannot be called under mm_take_all_locks(), but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		 * know that mm->notifier_subscriptions can't change while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 * hold the write side of the mmap_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		subscriptions = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		if (!subscriptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		init_subscriptions(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	mmu_notifier_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	ret = mm_take_all_locks(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		mmu_notifier_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 * Serialize the update against mmu_notifier_unregister. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * side note: mmu_notifier_release can't run concurrently with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 * us because we hold the mm_users pin (either implicitly as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 * current->mm or explicitly with get_task_mm() or similar).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 * We can't race against any other mmu notifier method either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	 * thanks to mm_take_all_locks().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * release semantics on the initialization of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * mmu_notifier_subscriptions's contents are provided for unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * readers.  acquire can only be used while holding the mmgrab or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 * mmget, and is safe because once created the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * As above, users holding the mmap_lock or one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * mm_take_all_locks() do not need to use acquire semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (subscriptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		smp_store_release(&mm->notifier_subscriptions, subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	mm->notifier_subscriptions->hdr.valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (subscription) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		/* Pairs with the mmdrop in mmu_notifier_unregister_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		mmgrab(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		subscription->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		subscription->users = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		spin_lock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		hlist_add_head_rcu(&subscription->hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 				   &mm->notifier_subscriptions->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		spin_unlock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		mm->notifier_subscriptions->has_itree = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	mm_drop_all_locks(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	mmu_notifier_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) out_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	kfree(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) EXPORT_SYMBOL_GPL(__mmu_notifier_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * mmu_notifier_register - Register a notifier on a mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * @subscription: The notifier to attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * @mm: The mm to attach the notifier to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * Must not hold mmap_lock nor any other VM related lock when calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * this registration function. Must also ensure mm_users can't go down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * to zero while this runs to avoid races with mmu_notifier_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * so mm has to be current->mm or the mm should be pinned safely such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * as with get_task_mm(). If the mm is not current->mm, the mm_users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * pin should be released by calling mmput after mmu_notifier_register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * unregister the notifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * While the caller has a mmu_notifier get the subscription->mm pointer will remain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * valid, and can be converted to an active mm pointer via mmget_not_zero().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) int mmu_notifier_register(struct mmu_notifier *subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			  struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	ret = __mmu_notifier_register(subscription, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) EXPORT_SYMBOL_GPL(mmu_notifier_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static struct mmu_notifier *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	spin_lock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	hlist_for_each_entry_rcu(subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 				 &mm->notifier_subscriptions->list, hlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		if (subscription->ops != ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		if (likely(subscription->users != UINT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			subscription->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			subscription = ERR_PTR(-EOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		spin_unlock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		return subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	spin_unlock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  * mmu_notifier_get_locked - Return the single struct mmu_notifier for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  *                           the mm & ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * @ops: The operations struct being subscribe with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  * @mm : The mm to attach notifiers too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  * This function either allocates a new mmu_notifier via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * ops->alloc_notifier(), or returns an already existing notifier on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  * list. The value of the ops pointer is used to determine when two notifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * are the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * Each call to mmu_notifier_get() must be paired with a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * While the caller has a mmu_notifier get the mm pointer will remain valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * and can be converted to an active mm pointer via mmget_not_zero().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 					     struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct mmu_notifier *subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	mmap_assert_write_locked(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (mm->notifier_subscriptions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		subscription = find_get_mmu_notifier(mm, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		if (subscription)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			return subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	subscription = ops->alloc_notifier(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (IS_ERR(subscription))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		return subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	subscription->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	ret = __mmu_notifier_register(subscription, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	return subscription;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	subscription->ops->free_notifier(subscription);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) /* this is called after the last mmu_notifier_unregister() returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	kfree(mm->notifier_subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	mm->notifier_subscriptions = LIST_POISON1; /* debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * This releases the mm_count pin automatically and frees the mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * structure if it was the last user of it. It serializes against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * running mmu notifiers with SRCU and against mmu_notifier_unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * with the unregister lock + SRCU. All sptes must be dropped before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * calling mmu_notifier_unregister. ->release or any other notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * method may be invoked concurrently with mmu_notifier_unregister,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * and only after mmu_notifier_unregister returned we're guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * that ->release or any other method can't run anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) void mmu_notifier_unregister(struct mmu_notifier *subscription,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			     struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (!hlist_unhashed(&subscription->hlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		 * SRCU here will force exit_mmap to wait for ->release to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		 * finish before freeing the pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		id = srcu_read_lock(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		 * exit_mmap will block in mmu_notifier_release to guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		 * that ->release is called before freeing the pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		if (subscription->ops->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			subscription->ops->release(subscription, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		srcu_read_unlock(&srcu, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		spin_lock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 * Can not use list_del_rcu() since __mmu_notifier_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * can delete it before we hold the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		hlist_del_init_rcu(&subscription->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		spin_unlock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	 * Wait for any running method to finish, of course including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	 * ->release if it was run by mmu_notifier_release instead of us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	synchronize_srcu(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) static void mmu_notifier_free_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct mmu_notifier *subscription =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		container_of(rcu, struct mmu_notifier, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct mm_struct *mm = subscription->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	subscription->ops->free_notifier(subscription);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	/* Pairs with the get in __mmu_notifier_register() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  * mmu_notifier_put - Release the reference on the notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * @subscription: The notifier to act on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * This function must be paired with each mmu_notifier_get(), it releases the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * reference obtained by the get. If this is the last reference then process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * to free the notifier will be run asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * when the mm_struct is destroyed. Instead free_notifier is always called to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * release any resources held by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * As ops->release is not guaranteed to be called, the user must ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * all sptes are dropped, and no new sptes can be established before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * mmu_notifier_put() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * This function can be called from the ops->release callback, however the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * caller must still ensure it is called pairwise with mmu_notifier_get().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * Modules calling this function must call mmu_notifier_synchronize() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * their __exit functions to ensure the async work is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) void mmu_notifier_put(struct mmu_notifier *subscription)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct mm_struct *mm = subscription->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	spin_lock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (WARN_ON(!subscription->users) || --subscription->users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	hlist_del_init_rcu(&subscription->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	spin_unlock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	spin_unlock(&mm->notifier_subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) EXPORT_SYMBOL_GPL(mmu_notifier_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static int __mmu_interval_notifier_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	unsigned long length, const struct mmu_interval_notifier_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	interval_sub->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	interval_sub->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	interval_sub->interval_tree.start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * Note that the representation of the intervals in the interval tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * considers the ending point as contained in the interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (length == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	    check_add_overflow(start, length - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			       &interval_sub->interval_tree.last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* Must call with a mmget() held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (WARN_ON(atomic_read(&mm->mm_users) <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* pairs with mmdrop in mmu_interval_notifier_remove() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	mmgrab(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	 * If some invalidate_range_start/end region is going on in parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	 * we don't know what VA ranges are affected, so we must assume this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 * new range is included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	 * If the itree is invalidating then we are not allowed to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	 * it. Retrying until invalidation is done is tricky due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	 * possibility for live lock, instead defer the add to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	 * mn_itree_inv_end() so this algorithm is deterministic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 * In all cases the value for the interval_sub->invalidate_seq should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 * odd, see mmu_interval_read_begin()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	spin_lock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (subscriptions->active_invalidate_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		if (mn_itree_is_invalidating(subscriptions))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			hlist_add_head(&interval_sub->deferred_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				       &subscriptions->deferred_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			subscriptions->invalidate_seq |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			interval_tree_insert(&interval_sub->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 					     &subscriptions->itree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		interval_sub->invalidate_seq = subscriptions->invalidate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		WARN_ON(mn_itree_is_invalidating(subscriptions));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		 * The starting seq for a subscription not under invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		 * should be odd, not equal to the current invalidate_seq and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		 * invalidate_seq should not 'wrap' to the new seq any time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		 * soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		interval_sub->invalidate_seq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			subscriptions->invalidate_seq - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		interval_tree_insert(&interval_sub->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				     &subscriptions->itree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  * mmu_interval_notifier_insert - Insert an interval notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * @interval_sub: Interval subscription to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * @start: Starting virtual address to monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * @length: Length of the range to monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * @mm: mm_struct to attach to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * @ops: Interval notifier operations to be called on matching events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * This function subscribes the interval notifier for notifications from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  * mm.  Upon return the ops related to mmu_interval_notifier will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  * whenever an event that intersects with the given range occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * Upon return the range_notifier may not be present in the interval tree yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  * The caller must use the normal interval notifier read flow via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * mmu_interval_read_begin() to establish SPTEs for this range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				 struct mm_struct *mm, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				 unsigned long length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 				 const struct mmu_interval_notifier_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	struct mmu_notifier_subscriptions *subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	might_lock(&mm->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (!subscriptions || !subscriptions->has_itree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		ret = mmu_notifier_register(NULL, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		subscriptions = mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 					      start, length, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int mmu_interval_notifier_insert_locked(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	unsigned long start, unsigned long length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	const struct mmu_interval_notifier_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct mmu_notifier_subscriptions *subscriptions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	mmap_assert_write_locked(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (!subscriptions || !subscriptions->has_itree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		ret = __mmu_notifier_register(NULL, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		subscriptions = mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 					      start, length, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  * mmu_interval_notifier_remove - Remove a interval notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  * @interval_sub: Interval subscription to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  * This function must be paired with mmu_interval_notifier_insert(). It cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  * be called from any ops callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * Once this returns ops callbacks are no longer running on other CPUs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * will not be called in future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	struct mm_struct *mm = interval_sub->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct mmu_notifier_subscriptions *subscriptions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		mm->notifier_subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	unsigned long seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	spin_lock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	if (mn_itree_is_invalidating(subscriptions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		 * remove is being called after insert put this on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		 * deferred list, but before the deferred list was processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			hlist_del(&interval_sub->deferred_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			hlist_add_head(&interval_sub->deferred_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				       &subscriptions->deferred_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			seq = subscriptions->invalidate_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		interval_tree_remove(&interval_sub->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 				     &subscriptions->itree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	spin_unlock(&subscriptions->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	 * The possible sleep on progress in the invalidation requires the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 * caller not hold any locks held by invalidation callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		wait_event(subscriptions->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			   READ_ONCE(subscriptions->invalidate_seq) != seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	/* pairs with mmgrab in mmu_interval_notifier_insert() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  * This function ensures that all outstanding async SRU work from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * associated with an unused mmu_notifier will no longer be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * Before using the caller must ensure that all of its mmu_notifiers have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * fully released via mmu_notifier_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * Modules using the mmu_notifier_put() API should call this in their __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * function to avoid module unloading races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) void mmu_notifier_synchronize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	synchronize_srcu(&srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	/* Return true if the vma still have the read flag set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	return range->vma->vm_flags & VM_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct mmu_notifier_subscriptions *subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	struct percpu_rw_semaphore_atomic *sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	subscriptions = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (!subscriptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	sem = kzalloc(sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (!sem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		kfree(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	percpu_init_rwsem(&sem->rw_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	init_subscriptions(subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	subscriptions->has_itree = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	subscriptions->hdr.valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	subscriptions->hdr.mmu_notifier_lock = sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	mm->notifier_subscriptions = subscriptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	percpu_rwsem_async_destroy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			mm->notifier_subscriptions->hdr.mmu_notifier_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	kfree(mm->notifier_subscriptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	mm->notifier_subscriptions = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */