Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * DAMON Primitives for Virtual Address Spaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author: SeongJae Park <sjpark@amazon.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define pr_fmt(fmt) "damon-va: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm-generic/mman-common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/page_idle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/pagewalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "prmtv-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #undef DAMON_MIN_REGION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define DAMON_MIN_REGION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * 't->id' should be the pointer to the relevant 'struct pid' having reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * count.  Caller must put the returned task, unless it is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	return get_pid_task((struct pid *)t->id, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * Get the mm_struct of the given target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * Caller _must_ put the mm_struct after use, unless it is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * Returns the mm_struct of the target on success, NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static struct mm_struct *damon_get_mm(struct damon_target *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	task = damon_get_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	if (!task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	mm = get_task_mm(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Functions for the initial monitoring target regions construction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Size-evenly split a region into 'nr_pieces' small regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Returns 0 on success, or negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static int damon_va_evenly_split_region(struct damon_target *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		struct damon_region *r, unsigned int nr_pieces)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned long sz_orig, sz_piece, orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct damon_region *n = NULL, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (!r || !nr_pieces)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	orig_end = r->ar.end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	sz_orig = r->ar.end - r->ar.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (!sz_piece)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	r->ar.end = r->ar.start + sz_piece;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	next = damon_next_region(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	for (start = r->ar.end; start + sz_piece <= orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			start += sz_piece) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		n = damon_new_region(start, start + sz_piece);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		damon_insert_region(n, r, next, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		r = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* complement last region for possible rounding error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		n->ar.end = orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static unsigned long sz_range(struct damon_addr_range *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return r->end - r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Find three regions separated by two biggest unmapped regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * vma		the head vma of the target address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * regions	an array of three address ranges that results will be saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * This function receives an address space and finds three regions in it which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * separated by the two biggest unmapped regions in the space.  Please refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * below comments of '__damon_va_init_regions()' function to know why this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * Returns 0 if success, or negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int __damon_va_three_regions(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				       struct damon_addr_range regions[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct vm_area_struct *last_vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	unsigned long start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct rb_root rbroot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/* Find two biggest gaps so that first_gap > second_gap > others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	for (; vma; vma = vma->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (!last_vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			start = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			rbroot.rb_node = &vma->vm_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			vma = rb_entry(rb_last(&rbroot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 					struct vm_area_struct, vm_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		gap.start = last_vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		gap.end = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		if (sz_range(&gap) > sz_range(&second_gap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			swap(gap, second_gap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			if (sz_range(&second_gap) > sz_range(&first_gap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 				swap(second_gap, first_gap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		last_vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (!sz_range(&second_gap) || !sz_range(&first_gap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	/* Sort the two biggest gaps by address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (first_gap.start > second_gap.start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		swap(first_gap, second_gap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	/* Store the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	regions[0].start = ALIGN(start, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * Get the three regions in the given target (task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * Returns 0 on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int damon_va_three_regions(struct damon_target *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 				struct damon_addr_range regions[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	mm = damon_get_mm(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	rc = __damon_va_three_regions(mm->mmap, regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * Initialize the monitoring target regions for the given target (task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * t	the given target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * Because only a number of small portions of the entire address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * is actually mapped to the memory and accessed, monitoring the unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * regions is wasteful.  That said, because we can deal with small noises,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * tracking every mapping is not strictly required but could even incur a high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * overhead if the mapping frequently changes or the number of mappings is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * high.  The adaptive regions adjustment mechanism will further help to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * with the noise by simply identifying the unmapped areas as a region that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * has no access.  Moreover, applying the real mappings that would have many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * unmapped areas inside will make the adaptive mechanism quite complex.  That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * said, too huge unmapped areas inside the monitoring target should be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * to not take the time for the adaptive mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * For the reason, we convert the complex mappings to three distinct regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * that cover every mapped area of the address space.  Also the two gaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * between the three regions are the two biggest unmapped areas in the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * address space.  In detail, this function first identifies the start and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * end of the mappings and the two biggest unmapped areas of the address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * Then, it constructs the three regions as below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  *     [mappings[0]->start, big_two_unmapped_areas[0]->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  *     [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  *     [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * As usual memory map of processes is as below, the gap between the heap and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * region and the stack will be two biggest unmapped regions.  Because these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * gaps are exceptionally huge areas in usual address space, excluding these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * two biggest unmapped regions will be sufficient to make a trade-off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *   <heap>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  *   <BIG UNMAPPED REGION 1>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  *   <uppermost mmap()-ed region>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  *   (other mmap()-ed regions and small unmapped regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *   <lowermost mmap()-ed region>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *   <BIG UNMAPPED REGION 2>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *   <stack>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void __damon_va_init_regions(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 				     struct damon_target *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct damon_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	struct damon_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct damon_addr_range regions[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	unsigned long sz = 0, nr_pieces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	int i, tidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (damon_va_three_regions(t, regions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		damon_for_each_target(ti, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			if (ti == t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			tidx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		pr_debug("Failed to get three regions of %dth target\n", tidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		sz += regions[i].end - regions[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (ctx->min_nr_regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		sz /= ctx->min_nr_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (sz < DAMON_MIN_REGION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		sz = DAMON_MIN_REGION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	/* Set the initial three regions of the target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		r = damon_new_region(regions[i].start, regions[i].end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			pr_err("%d'th init region creation failed\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		damon_add_region(r, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		nr_pieces = (regions[i].end - regions[i].start) / sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		damon_va_evenly_split_region(t, r, nr_pieces);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Initialize '->regions_list' of every target (task) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void damon_va_init(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct damon_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	damon_for_each_target(t, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		/* the user may set the target regions as they want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		if (!damon_nr_regions(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			__damon_va_init_regions(ctx, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * Functions for the dynamic monitoring target regions update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * Check whether a region is intersecting an address range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Returns true if it is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static bool damon_intersect(struct damon_region *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		struct damon_addr_range *re)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * Update damon regions for the three big regions of the given target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * t		the given target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * bregions	the three big regions of the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void damon_va_apply_three_regions(struct damon_target *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		struct damon_addr_range bregions[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct damon_region *r, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	/* Remove regions which are not in the three big regions now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	damon_for_each_region_safe(r, next, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			if (damon_intersect(r, &bregions[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		if (i == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			damon_destroy_region(r, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/* Adjust intersecting regions to fit with the three big regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		struct damon_region *first = NULL, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		struct damon_region *newr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		struct damon_addr_range *br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		br = &bregions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		/* Get the first and last regions which intersects with br */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		damon_for_each_region(r, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			if (damon_intersect(r, br)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 					first = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				last = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			if (r->ar.start >= br->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			/* no damon_region intersects with this big region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			newr = damon_new_region(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 					ALIGN_DOWN(br->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 						DAMON_MIN_REGION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					ALIGN(br->end, DAMON_MIN_REGION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			if (!newr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			damon_insert_region(newr, damon_prev_region(r), r, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			first->ar.start = ALIGN_DOWN(br->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 					DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * Update regions for current memory mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void damon_va_update(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct damon_addr_range three_regions[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct damon_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	damon_for_each_target(t, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		if (damon_va_three_regions(t, three_regions))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		damon_va_apply_three_regions(t, three_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (pmd_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		ptl = pmd_lock(walk->mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		if (pmd_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			damon_pmdp_mkold(pmd, walk->mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (!pte_present(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	damon_ptep_mkold(pte, walk->mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	pte_unmap_unlock(pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	bool referenced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	pte_t entry = huge_ptep_get(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct page *page = pte_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (pte_young(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		referenced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		entry = pte_mkold(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		huge_ptep_set_access_flags(vma, addr, pte, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 					   vma->vm_flags & VM_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #ifdef CONFIG_MMU_NOTIFIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (mmu_notifier_clear_young(mm, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 				     addr + huge_page_size(hstate_vma(vma))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		referenced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #endif /* CONFIG_MMU_NOTIFIER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (referenced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		set_page_young(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	set_page_idle(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 				     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 				     struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct hstate *h = hstate_vma(walk->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	ptl = huge_pte_lock(h, walk->mm, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	entry = huge_ptep_get(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	if (!pte_present(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #define damon_mkold_hugetlb_entry NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #endif /* CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static const struct mm_walk_ops damon_mkold_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	.pmd_entry = damon_mkold_pmd_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	.hugetlb_entry = damon_mkold_hugetlb_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  * Functions for the access checking of the regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			struct mm_struct *mm, struct damon_region *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	damon_va_mkold(mm, r->sampling_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	struct damon_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct damon_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	damon_for_each_target(t, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		mm = damon_get_mm(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		damon_for_each_region(r, t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			__damon_va_prepare_access_check(ctx, mm, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct damon_young_walk_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	unsigned long *page_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	bool young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	struct damon_young_walk_private *priv = walk->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if (pmd_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		ptl = pmd_lock(walk->mm, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		if (!pmd_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			goto regular_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		page = damon_get_page(pmd_pfn(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			goto huge_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		if (pmd_young(*pmd) || !page_is_idle(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 					mmu_notifier_test_young(walk->mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 						addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			*priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			priv->young = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) huge_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) regular_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (!pte_present(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	page = damon_get_page(pte_pfn(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (pte_young(*pte) || !page_is_idle(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			mmu_notifier_test_young(walk->mm, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		*priv->page_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		priv->young = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	pte_unmap_unlock(pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 				     unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 				     struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	struct damon_young_walk_private *priv = walk->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	struct hstate *h = hstate_vma(walk->vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	pte_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	ptl = huge_pte_lock(h, walk->mm, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	entry = huge_ptep_get(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	if (!pte_present(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	page = pte_page(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (pte_young(entry) || !page_is_idle(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	    mmu_notifier_test_young(walk->mm, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		*priv->page_sz = huge_page_size(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		priv->young = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	spin_unlock(ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #define damon_young_hugetlb_entry NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #endif /* CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static const struct mm_walk_ops damon_young_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	.pmd_entry = damon_young_pmd_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	.hugetlb_entry = damon_young_hugetlb_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		unsigned long *page_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	struct damon_young_walk_private arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		.page_sz = page_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		.young = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	return arg.young;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)  * Check whether the region was accessed after the last preparation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)  * mm	'mm_struct' for the given virtual address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)  * r	the region to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static void __damon_va_check_access(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			       struct mm_struct *mm, struct damon_region *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	static struct mm_struct *last_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	static unsigned long last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	static unsigned long last_page_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	static bool last_accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	/* If the region is in the last checked page, reuse the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		if (last_accessed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			r->nr_accesses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (last_accessed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		r->nr_accesses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	last_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	last_addr = r->sampling_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	struct damon_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	struct damon_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	unsigned int max_nr_accesses = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	damon_for_each_target(t, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		mm = damon_get_mm(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		damon_for_each_region(r, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			__damon_va_check_access(ctx, mm, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	return max_nr_accesses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)  * Functions for the target validity check and cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) bool damon_va_target_valid(void *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	struct damon_target *t = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	task = damon_get_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	if (task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #ifndef CONFIG_ADVISE_SYSCALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static unsigned long damos_madvise(struct damon_target *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		struct damon_region *r, int behavior)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static unsigned long damos_madvise(struct damon_target *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		struct damon_region *r, int behavior)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	unsigned long start = PAGE_ALIGN(r->ar.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	unsigned long applied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	mm = damon_get_mm(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	applied = do_madvise(mm, start, len, behavior) ? 0 : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	return applied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) #endif	/* CONFIG_ADVISE_SYSCALLS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		struct damon_target *t, struct damon_region *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		struct damos *scheme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	int madv_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	switch (scheme->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	case DAMOS_WILLNEED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		madv_action = MADV_WILLNEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	case DAMOS_COLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		madv_action = MADV_COLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	case DAMOS_PAGEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		madv_action = MADV_PAGEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	case DAMOS_HUGEPAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		madv_action = MADV_HUGEPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	case DAMOS_NOHUGEPAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		madv_action = MADV_NOHUGEPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	case DAMOS_STAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	return damos_madvise(t, r, madv_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static int damon_va_scheme_score(struct damon_ctx *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		struct damon_target *t, struct damon_region *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		struct damos *scheme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	switch (scheme->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	case DAMOS_PAGEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		return damon_pageout_score(context, r, scheme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	return DAMOS_MAX_SCORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) void damon_va_set_primitives(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	ctx->primitive.init = damon_va_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	ctx->primitive.update = damon_va_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	ctx->primitive.check_accesses = damon_va_check_accesses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	ctx->primitive.reset_aggregated = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	ctx->primitive.target_valid = damon_va_target_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	ctx->primitive.cleanup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	ctx->primitive.apply_scheme = damon_va_apply_scheme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	ctx->primitive.get_scheme_score = damon_va_scheme_score;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #include "vaddr-test.h"