^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* internal.h: mm/ internal definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __MM_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __MM_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/tracepoint-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The set of flags that only affect watermark checking and reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * behaviour. This is used by the MM to obey the caller constraints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * about IO, FS and watermark checking while ignoring placement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * hints such as HIGHMEM usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __GFP_ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* The GFP flags allowed during early boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Control allocation cpuset and node placement constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Do not use these with a slab allocator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void page_writeback_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) vm_fault_t do_swap_page(struct vm_fault *vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern struct vm_area_struct *get_vma(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) extern void put_vma(struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline bool vma_has_changed(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Matches both the wmb in write_seqlock_{begin,end}() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the wmb in vma_rb_erase().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return ret || seq != vmf->sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long floor, unsigned long ceiling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void unmap_page_range(struct mmu_gather *tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned long addr, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct zap_details *details);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long lookahead_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void force_page_cache_ra(struct readahead_control *, struct file_ra_state *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline void force_page_cache_readahead(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct file *file, pgoff_t index, unsigned long nr_to_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) DEFINE_READAHEAD(ractl, file, mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) force_page_cache_ra(&ractl, &file->f_ra, nr_to_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * page_evictable - test whether a page is evictable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @page: the page to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Test whether page is evictable--i.e., should be placed on active/inactive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * lists vs unevictable list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Reasons page might not be evictable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * (1) page's mapping marked unevictable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * (2) page is part of an mlocked VMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline bool page_evictable(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Prevent address_space of inode and swap cache from being freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Turn a non-refcounted page (->_refcount == 0) into refcounted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * a count of one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void set_page_refcounted(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) VM_BUG_ON_PAGE(PageTail(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) VM_BUG_ON_PAGE(page_ref_count(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) set_page_count(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) extern unsigned long highest_memmap_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Maximum number of reclaim retries without progress before the OOM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * killer is consider the only way forward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define MAX_RECLAIM_RETRIES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * in mm/vmscan.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern int isolate_lru_page(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) extern void putback_lru_page(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * in mm/rmap.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * in mm/page_alloc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Structure for holding the mostly immutable allocation parameters passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * between functions involved in allocations, including the alloc_pages*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * family of functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * nodemask, migratetype and highest_zoneidx are initialized only once in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * __alloc_pages_nodemask() and then never change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * zonelist, preferred_zone and highest_zoneidx are set first in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * __alloc_pages_nodemask() for the fast path, and might be later changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * in __alloc_pages_slowpath(). All other functions pass the whole structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * by a const pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct alloc_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct zonelist *zonelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) nodemask_t *nodemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct zoneref *preferred_zoneref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int migratetype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * highest_zoneidx represents highest usable zone index of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * the allocation request. Due to the nature of the zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * memory on lower zone than the highest_zoneidx will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * protected by lowmem_reserve[highest_zoneidx].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * highest_zoneidx is also used by reclaim/compaction to limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * the target zone since higher zone than this index cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * usable for this allocation request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) enum zone_type highest_zoneidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bool spread_dirty_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Locate the struct page for both the matching buddy in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * pair (buddy1) and the combined O(n+1) page they form (page).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * 1) Any buddy B1 will have an order O twin B2 which satisfies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * the following equation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * B2 = B1 ^ (1 << O)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * For example, if the starting buddy (buddy2) is #8 its order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * 1 buddy is #10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * 2) Any buddy B will have an order O+1 parent P which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * satisfies the following equation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * P = B & ~(1 << O)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return page_pfn ^ (1 << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned long end_pfn, struct zone *zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned long end_pfn, struct zone *zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (zone->contiguous)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return pfn_to_page(start_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) extern int __isolate_free_page(struct page *page, unsigned int order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) extern void __putback_isolated_page(struct page *page, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int mt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) extern void memblock_free_pages(struct page *page, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) extern void __free_pages_core(struct page *page, unsigned int order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) extern void prep_compound_page(struct page *page, unsigned int order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) extern void post_alloc_hook(struct page *page, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) gfp_t gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) extern int user_min_free_kbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) extern void zone_pcp_update(struct zone *zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) extern void zone_pcp_reset(struct zone *zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #if defined CONFIG_COMPACTION || defined CONFIG_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * in mm/compaction.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * compact_control is used to track pages being migrated and the free pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * they are being migrated to during memory compaction. The free_pfn starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * at the end of a zone and migrate_pfn begins at the start. Movable pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * are moved to the end of a zone during a compaction run and the run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * completes when free_pfn <= migrate_pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct compact_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct list_head freepages; /* List of free pages to migrate to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct list_head migratepages; /* List of pages being migrated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned int nr_freepages; /* Number of isolated free pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned int nr_migratepages; /* Number of pages to migrate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned long free_pfn; /* isolate_freepages search base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned long migrate_pfn; /* isolate_migratepages search base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned long fast_start_pfn; /* a pfn to start linear scan from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long total_migrate_scanned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long total_free_scanned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned short fast_search_fail;/* failures to use free list searches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) short search_order; /* order to start a fast search at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) const gfp_t gfp_mask; /* gfp mask of a direct compactor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int order; /* order a direct compactor needs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int migratetype; /* migratetype of direct compactor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) const unsigned int alloc_flags; /* alloc flags of a direct compactor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) const int highest_zoneidx; /* zone index of a direct compactor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) enum migrate_mode mode; /* Async or sync migration mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bool ignore_skip_hint; /* Scan blocks even if marked skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bool no_set_skip_hint; /* Don't mark blocks for skipping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) bool ignore_block_suitable; /* Scan blocks considered unsuitable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bool direct_compaction; /* False from kcompactd or /proc/... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) bool proactive_compaction; /* kcompactd proactive compaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) bool whole_zone; /* Whole zone should/has been scanned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bool contended; /* Signal lock or sched contention */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bool rescan; /* Rescanning the same pageblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bool alloc_contig; /* alloc_contig_range allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Used in direct compaction when a page should be taken from the freelists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * immediately when one is created during the free path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct capture_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct compact_control *cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) isolate_freepages_range(struct compact_control *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long start_pfn, unsigned long end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) isolate_migratepages_range(struct compact_control *cc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long low_pfn, unsigned long end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int find_suitable_fallback(struct free_area *area, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int migratetype, bool only_stealable, bool *can_steal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * This function returns the order of a free page in the buddy system. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * general, page_zone(page)->lock must be held by the caller to prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * page from being allocated in parallel and returning garbage as the order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * If a caller does not hold page_zone(page)->lock, it must guarantee that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * page cannot be allocated or merged in parallel. Alternatively, it must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * handle invalid values gracefully, and use buddy_order_unsafe() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline unsigned int buddy_order(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* PageBuddy() must be checked by the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * PageBuddy() should be checked first by the caller to minimize race window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * and invalid values must be handled gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * READ_ONCE is used so that if the caller assigns the result into a local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * variable and e.g. tests it for valid range before using, the compiler cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * decide to remove the variable and inline the page_private(page) multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * times, potentially observing different values in the tests and the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * use of the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline bool is_cow_mapping(vm_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * These three helpers classifies VMAs for virtual memory accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Executable code area - executable, not writable, not stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static inline bool is_exec_mapping(vm_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Stack area - atomatically grows in one direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * do_mmap() forbids all other combinations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static inline bool is_stack_mapping(vm_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return (flags & VM_STACK) == VM_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Data area - private, writable, not stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline bool is_data_mapping(vm_flags_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* mm/util.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct vm_area_struct *prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) extern long populate_vma_page_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long start, unsigned long end, int *nonblocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) extern void munlock_vma_pages_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * must be called with vma's mmap_lock held for read or write, and page locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) extern void mlock_vma_page(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) extern unsigned int munlock_vma_page(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Clear the page's PageMlocked(). This can be useful in a situation where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * we want to unconditionally remove a page from the pagecache -- e.g.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * on truncation or freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * It is legal to call this function for any page, mlocked or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * If called for a page that is still mapped by mlocked vmas, all we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * is revert to lazy LRU behaviour -- semantics are not broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) extern void clear_page_mlock(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * (because that does not go through the full procedure of migration ptes):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * to migrate the Mlocked page flag; update statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static inline void mlock_migrate_page(struct page *newpage, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (TestClearPageMlocked(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int nr_pages = thp_nr_pages(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Holding pmd lock, no change in irq context: __mod is safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) SetPageMlocked(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * At what user virtual address is page expected in vma?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Returns -EFAULT if all of the page is outside the range of vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * If page is a compound head, the entire compound page is considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) vma_address(struct page *page, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pgoff = page_to_pgoff(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (pgoff >= vma->vm_pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) address = vma->vm_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Check for address beyond vma (or wrapped through 0?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (address < vma->vm_start || address >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) address = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) } else if (PageHead(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Test above avoids possibility of wrap to 0 on 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) address = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) address = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Then at what user virtual address will none of the page be found in vma?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Assumes that vma_address() already returned a good starting address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * If page is a compound head, the entire compound page is considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) vma_address_end(struct page *page, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pgoff = page_to_pgoff(page) + compound_nr(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Check for address beyond vma (or wrapped through 0?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (address < vma->vm_start || address > vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) address = vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct file *fpin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int flags = vmf->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (fpin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return fpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * anything, so we only pin the file and drop the mmap_lock if only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (fault_flag_allow_retry_first(flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) fpin = get_file(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mmap_read_unlock(vmf->vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return fpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #else /* !CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static inline void clear_page_mlock(struct page *page) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static inline void mlock_vma_page(struct page *page) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static inline void mlock_migrate_page(struct page *new, struct page *old) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) #endif /* !CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Return the mem_map entry representing the 'offset' subpage within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * the maximally aligned gigantic page 'base'. Handle any discontiguity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static inline struct page *mem_map_offset(struct page *base, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (unlikely(offset >= MAX_ORDER_NR_PAGES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return nth_page(base, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * Iterator over all subpages within the maximally aligned gigantic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * page 'base'. Handle any discontiguity in the mem_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static inline struct page *mem_map_next(struct page *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct page *base, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned long pfn = page_to_pfn(base) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return iter + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Memory initialisation debug and verification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) enum mminit_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) MMINIT_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) MMINIT_VERIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) MMINIT_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #ifdef CONFIG_DEBUG_MEMORY_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) extern int mminit_loglevel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #define mminit_dprintk(level, prefix, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (level < mminit_loglevel) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (level <= MMINIT_WARNING) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pr_warn("mminit::" prefix " " fmt, ##arg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) extern void mminit_verify_pageflags_layout(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) extern void mminit_verify_zonelist(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static inline void mminit_dprintk(enum mminit_level level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) const char *prefix, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static inline void mminit_verify_pageflags_layout(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static inline void mminit_verify_zonelist(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #endif /* CONFIG_DEBUG_MEMORY_INIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #if defined(CONFIG_SPARSEMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unsigned long *end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) unsigned long *end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #endif /* CONFIG_SPARSEMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #define NODE_RECLAIM_NOSCAN -2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #define NODE_RECLAIM_FULL -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #define NODE_RECLAIM_SOME 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #define NODE_RECLAIM_SUCCESS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return NODE_RECLAIM_NOSCAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) extern int hwpoison_filter(struct page *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) extern u32 hwpoison_filter_dev_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) extern u32 hwpoison_filter_dev_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) extern u64 hwpoison_filter_flags_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) extern u64 hwpoison_filter_flags_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) extern u64 hwpoison_filter_memcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) extern u32 hwpoison_filter_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned long, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) extern void set_pageblock_order(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned int reclaim_clean_pages_from_list(struct zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct list_head *page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* The ALLOC_WMARK bits are used as an index to zone->watermark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #define ALLOC_WMARK_MIN WMARK_MIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #define ALLOC_WMARK_LOW WMARK_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #define ALLOC_WMARK_HIGH WMARK_HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Mask to get the watermark bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * cannot assume a reduced access to memory reserves is sufficient for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * !MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #define ALLOC_OOM 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #define ALLOC_OOM ALLOC_NO_WATERMARKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) #define ALLOC_HARDER 0x10 /* try to alloc harder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) #ifdef CONFIG_ZONE_DMA32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #define ALLOC_NOFRAGMENT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) enum ttu_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct tlbflush_unmap_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * only for MM internal work items which do not depend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * any allocations or locks which might depend on allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) extern struct workqueue_struct *mm_percpu_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) void try_to_unmap_flush(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void try_to_unmap_flush_dirty(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) void flush_tlb_batched_pending(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static inline void try_to_unmap_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static inline void try_to_unmap_flush_dirty(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static inline void flush_tlb_batched_pending(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) extern const struct trace_print_flags pageflag_names[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) extern const struct trace_print_flags vmaflag_names[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) extern const struct trace_print_flags gfpflag_names[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static inline bool is_migrate_highatomic(enum migratetype migratetype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return migratetype == MIGRATE_HIGHATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static inline bool is_migrate_highatomic_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) void setup_zone_pageset(struct zone *zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct migration_target_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int nid; /* preferred node id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) nodemask_t *nmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) gfp_t gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #endif /* __MM_INTERNAL_H */