^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * mm/interval_tree.c - interval tree for mapping->i_mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/rmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interval_tree_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) return v->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return v->vm_pgoff + vma_pages(v) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long, shared.rb_subtree_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* Insert node immediately after prev in the interval tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void vma_interval_tree_insert_after(struct vm_area_struct *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct vm_area_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct rb_root_cached *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct rb_node **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct vm_area_struct *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long last = vma_last_pgoff(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (!prev->shared.rb.rb_right) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) parent = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) link = &prev->shared.rb.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) parent = rb_entry(prev->shared.rb.rb_right,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct vm_area_struct, shared.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (parent->shared.rb_subtree_last < last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) parent->shared.rb_subtree_last = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) while (parent->shared.rb.rb_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) parent = rb_entry(parent->shared.rb.rb_left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct vm_area_struct, shared.rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (parent->shared.rb_subtree_last < last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) parent->shared.rb_subtree_last = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) link = &parent->shared.rb.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) node->shared.rb_subtree_last = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) rb_link_node(&node->shared.rb, &parent->shared.rb, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) rb_insert_augmented(&node->shared.rb, &root->rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) &vma_interval_tree_augment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return vma_start_pgoff(avc->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return vma_last_pgoff(avc->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) avc_start_pgoff, avc_last_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline, __anon_vma_interval_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct rb_root_cached *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #ifdef CONFIG_DEBUG_VM_RB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) node->cached_vma_start = avc_start_pgoff(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) node->cached_vma_last = avc_last_pgoff(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __anon_vma_interval_tree_insert(node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct rb_root_cached *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __anon_vma_interval_tree_remove(node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct anon_vma_chain *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long first, unsigned long last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return __anon_vma_interval_tree_iter_first(root, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct anon_vma_chain *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long first, unsigned long last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return __anon_vma_interval_tree_iter_next(node, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifdef CONFIG_DEBUG_VM_RB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif