^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright IBM Corp. 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Guest page hinting for unused pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/facility.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/page-states.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int cmma_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static int __init cmma(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (!kstrtobool(str, &enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) cmma_flag = enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __setup("cmma=", cmma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static inline int cmma_test_essa(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) register unsigned long tmp asm("0") = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) register int rc asm("1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* test ESSA_GET_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "0: la %0,0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "1:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) EX_TABLE(0b,1b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) : "=&d" (rc), "+&d" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void __init cmma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!cmma_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (cmma_test_essa()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cmma_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (test_facility(147))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cmma_flag = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline unsigned char get_page_state(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned char state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) : "=&d" (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) : "a" (page_to_phys(page)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "i" (ESSA_GET_STATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return state & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void set_page_unused(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) for (i = 0; i < (1 << order); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) : "=&d" (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) : "a" (page_to_phys(page + i)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "i" (ESSA_SET_UNUSED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline void set_page_stable_dat(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) for (i = 0; i < (1 << order); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) : "=&d" (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) : "a" (page_to_phys(page + i)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) "i" (ESSA_SET_STABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline void set_page_stable_nodat(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (i = 0; i < (1 << order); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) : "=&d" (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) : "a" (page_to_phys(page + i)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) "i" (ESSA_SET_STABLE_NODAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (pmd_none(*pmd) || pmd_large(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) page = virt_to_page(pmd_val(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) set_bit(PG_arch_1, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) } while (pmd++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (pud_none(*pud) || pud_large(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!pud_folded(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) page = virt_to_page(pud_val(*pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) set_bit(PG_arch_1, &page[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mark_kernel_pmd(pud, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } while (pud++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (p4d_none(*p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!p4d_folded(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) page = virt_to_page(p4d_val(*p4d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) set_bit(PG_arch_1, &page[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) mark_kernel_pud(p4d, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } while (p4d++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void mark_kernel_pgd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long addr, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pgd = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) next = pgd_addr_end(addr, MODULES_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (pgd_none(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!pgd_folded(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) page = virt_to_page(pgd_val(*pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) set_bit(PG_arch_1, &page[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mark_kernel_p4d(pgd, addr, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) } while (pgd++, addr = next, addr != MODULES_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void __init cmma_init_nodat(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long start, end, ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (cmma_flag < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Mark pages used in kernel page tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mark_kernel_pgd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Set all kernel pages not used for page tables to stable/no-dat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) page = pfn_to_page(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) for (ix = start; ix < end; ix++, page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (__test_and_clear_bit(PG_arch_1, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) continue; /* skip page table pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!list_empty(&page->lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) continue; /* skip free pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) set_page_stable_nodat(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void arch_free_page(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!cmma_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) set_page_unused(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void arch_alloc_page(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!cmma_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (cmma_flag < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) set_page_stable_dat(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) set_page_stable_nodat(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void arch_set_page_dat(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!cmma_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) set_page_stable_dat(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) void arch_set_page_nodat(struct page *page, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (cmma_flag < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) set_page_stable_nodat(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int arch_test_page_nodat(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned char state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (cmma_flag < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) state = get_page_state(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return !!(state & 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) void arch_set_page_states(int make_stable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long flags, order, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!cmma_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (make_stable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) drain_local_pages(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) spin_lock_irqsave(&zone->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) for_each_migratetype_order(order, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) list_for_each(l, &zone->free_area[order].free_list[t]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) page = list_entry(l, struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (make_stable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) set_page_stable_dat(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) set_page_unused(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_unlock_irqrestore(&zone->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }