^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DAMON Primitives for The Physical Address Space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: SeongJae Park <sj@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) "damon-pa: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/page_idle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/rmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "../internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "prmtv-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long addr, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct page_vma_mapped_walk pvmw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) .vma = vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .address = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) while (page_vma_mapped_walk(&pvmw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) addr = pvmw.address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (pvmw.pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void damon_pa_mkold(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct page *page = damon_get_page(PHYS_PFN(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct rmap_walk_control rwc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .rmap_one = __damon_pa_mkold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .anon_lock = page_lock_anon_vma_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool need_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!page_mapped(page) || !page_rmapping(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) set_page_idle(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) need_lock = !PageAnon(page) || PageKsm(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (need_lock && !trylock_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) rmap_walk(page, &rwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (need_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct damon_region *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) damon_pa_mkold(r->sampling_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct damon_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct damon_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) damon_for_each_target(t, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) damon_for_each_region(r, t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __damon_pa_prepare_access_check(ctx, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct damon_pa_access_chk_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long page_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bool accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long addr, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct damon_pa_access_chk_result *result = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct page_vma_mapped_walk pvmw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .vma = vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .address = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) result->accessed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) result->page_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (page_vma_mapped_walk(&pvmw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) addr = pvmw.address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (pvmw.pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) result->accessed = pte_young(*pvmw.pte) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) !page_is_idle(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mmu_notifier_test_young(vma->vm_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) result->accessed = pmd_young(*pvmw.pmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) !page_is_idle(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mmu_notifier_test_young(vma->vm_mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (result->accessed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) page_vma_mapped_walk_done(&pvmw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* If accessed, stop walking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return !result->accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct page *page = damon_get_page(PHYS_PFN(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct damon_pa_access_chk_result result = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .page_sz = PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) .accessed = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rmap_walk_control rwc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .arg = &result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .rmap_one = __damon_pa_young,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .anon_lock = page_lock_anon_vma_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bool need_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!page_mapped(page) || !page_rmapping(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (page_is_idle(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) result.accessed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) result.accessed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) need_lock = !PageAnon(page) || PageKsm(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (need_lock && !trylock_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) rmap_walk(page, &rwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (need_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *page_sz = result.page_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return result.accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void __damon_pa_check_access(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct damon_region *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static unsigned long last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static unsigned long last_page_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static bool last_accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* If the region is in the last checked page, reuse the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (ALIGN_DOWN(last_addr, last_page_sz) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (last_accessed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) r->nr_accesses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (last_accessed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) r->nr_accesses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) last_addr = r->sampling_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct damon_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct damon_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int max_nr_accesses = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) damon_for_each_target(t, ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) damon_for_each_region(r, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __damon_pa_check_access(ctx, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return max_nr_accesses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool damon_pa_target_valid(void *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct damon_target *t, struct damon_region *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct damos *scheme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned long addr, applied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) LIST_HEAD(page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (scheme->action != DAMOS_PAGEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct page *page = damon_get_page(PHYS_PFN(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ClearPageReferenced(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) test_and_clear_page_young(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (isolate_lru_page(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (PageUnevictable(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) putback_lru_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list_add(&page->lru, &page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) applied = reclaim_pages(&page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return applied * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int damon_pa_scheme_score(struct damon_ctx *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct damon_target *t, struct damon_region *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct damos *scheme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) switch (scheme->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case DAMOS_PAGEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return damon_pageout_score(context, r, scheme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return DAMOS_MAX_SCORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void damon_pa_set_primitives(struct damon_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ctx->primitive.init = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ctx->primitive.update = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ctx->primitive.check_accesses = damon_pa_check_accesses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ctx->primitive.reset_aggregated = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ctx->primitive.target_valid = damon_pa_target_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ctx->primitive.cleanup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ctx->primitive.apply_scheme = damon_pa_apply_scheme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ctx->primitive.get_scheme_score = damon_pa_scheme_score;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }