^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/page_owner.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/migrate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/stackdepot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * to use off stack temporal storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define PAGE_OWNER_STACK_DEPTH (16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct page_owner {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned short order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) short last_migrate_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) gfp_t gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) depot_stack_handle_t free_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u64 ts_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u64 free_ts_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bool page_owner_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) DEFINE_STATIC_KEY_FALSE(page_owner_inited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static depot_stack_handle_t dummy_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static depot_stack_handle_t failure_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static depot_stack_handle_t early_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void init_early_allocated_pages(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int __init early_page_owner_param(char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (strcmp(buf, "on") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) page_owner_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) early_param("page_owner", early_page_owner_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static bool need_page_owner(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return page_owner_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static __always_inline depot_stack_handle_t create_dummy_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long entries[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return stack_depot_save(entries, nr_entries, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static noinline void register_dummy_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dummy_handle = create_dummy_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static noinline void register_failure_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) failure_handle = create_dummy_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static noinline void register_early_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) early_handle = create_dummy_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void init_page_owner(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!page_owner_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) register_dummy_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) register_failure_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) register_early_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static_branch_enable(&page_owner_inited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) init_early_allocated_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct page_ext_operations page_owner_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .size = sizeof(struct page_owner),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .need = need_page_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .init = init_page_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct page_owner *get_page_owner(struct page_ext *page_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return (void *)page_ext + page_owner_ops.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) EXPORT_SYMBOL_GPL(get_page_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!page_owner_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* skip handle for tail pages of higher order allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!IS_ALIGNED(pfn, 1 << page_owner->order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) handle = READ_ONCE(page_owner->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) EXPORT_SYMBOL_GPL(get_page_owner_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline bool check_recursive_alloc(unsigned long *entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned int nr_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) for (i = 0; i < nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (entries[i] == ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static noinline depot_stack_handle_t save_stack(gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long entries[PAGE_OWNER_STACK_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * We need to check recursion here because our request to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * stackdepot could trigger memory allocation to save new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * entry. New memory allocation would reach here and call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * stack_depot_save_entries() again if we don't catch it. There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * still not enough memory in stackdepot so it would try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * allocate memory again and loop forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return dummy_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) handle = stack_depot_save(entries, nr_entries, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) handle = failure_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void __reset_page_owner(struct page *page, unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct page_ext *page_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) depot_stack_handle_t handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u64 free_ts_nsec = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) for (i = 0; i < (1 << order); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) page_owner->free_handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) page_owner->free_ts_nsec = free_ts_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) page_ext = page_ext_next(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void __set_page_owner_handle(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct page_ext *page_ext, depot_stack_handle_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned int order, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (i = 0; i < (1 << order); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) page_owner->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) page_owner->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) page_owner->gfp_mask = gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) page_owner->last_migrate_reason = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) page_owner->pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) page_owner->ts_nsec = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) page_ext = page_ext_next(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) noinline void __set_page_owner(struct page *page, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct page_ext *page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) handle = save_stack(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void __set_page_owner_migrate_reason(struct page *page, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct page_ext *page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) page_owner->last_migrate_reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void __split_page_owner(struct page *page, unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct page_ext *page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) page_owner->order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) page_ext = page_ext_next(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void __copy_page_owner(struct page *oldpage, struct page *newpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct page_ext *old_ext = lookup_page_ext(oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct page_ext *new_ext = lookup_page_ext(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct page_owner *old_page_owner, *new_page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (unlikely(!old_ext || !new_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) old_page_owner = get_page_owner(old_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) new_page_owner = get_page_owner(new_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) new_page_owner->order = old_page_owner->order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) new_page_owner->gfp_mask = old_page_owner->gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) new_page_owner->last_migrate_reason =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) old_page_owner->last_migrate_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) new_page_owner->handle = old_page_owner->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) new_page_owner->pid = old_page_owner->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) new_page_owner->ts_nsec = old_page_owner->ts_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * We don't clear the bit on the oldpage as it's going to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * after migration. Until then, the info can be useful in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * a bug, and the overal stats will be off a bit only temporarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Also, migrate_misplaced_transhuge_page() can still fail the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * migration and then we want the oldpage to retain the info. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * in that case we also don't need to explicitly clear the info from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * the new page, which will be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void pagetypeinfo_showmixedcount_print(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pg_data_t *pgdat, struct zone *zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct page_ext *page_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long end_pfn = pfn + zone->spanned_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned long count[MIGRATE_TYPES] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int pageblock_mt, page_mt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Scan block by block. First and last block may be incomplete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pfn = zone->zone_start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Walk the zone in pageblock_nr_pages steps. If a page block spans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * a zone boundary, it will be double counted between zones. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * not matter as the mixed block count will still be correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) for (; pfn < end_pfn; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) page = pfn_to_online_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) block_end_pfn = min(block_end_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pageblock_mt = get_pageblock_migratetype(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) for (; pfn < block_end_pfn; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!pfn_valid_within(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* The pageblock is online, no need to recheck. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (page_zone(page) != zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (PageBuddy(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long freepage_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) freepage_order = buddy_order_unsafe(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (freepage_order < MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pfn += (1UL << freepage_order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) page_mt = gfp_migratetype(page_owner->gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (pageblock_mt != page_mt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (is_migrate_cma(pageblock_mt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) count[MIGRATE_MOVABLE]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) count[pageblock_mt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pfn = block_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pfn += (1UL << page_owner->order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Print counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) for (i = 0; i < MIGRATE_TYPES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) seq_printf(m, "%12lu ", count[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) print_page_owner(char __user *buf, size_t count, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct page *page, struct page_owner *page_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) depot_stack_handle_t handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int ret, pageblock_mt, page_mt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned long *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) char *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) count = min_t(size_t, count, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) kbuf = kmalloc(count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ret = snprintf(kbuf, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns, free_ts %llu ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) page_owner->order, page_owner->gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) &page_owner->gfp_mask, page_owner->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) page_owner->ts_nsec, page_owner->free_ts_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (ret >= count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Print information relevant to grouping pages by mobility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pageblock_mt = get_pageblock_migratetype(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) page_mt = gfp_migratetype(page_owner->gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ret += snprintf(kbuf + ret, count - ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) migratetype_names[page_mt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pfn >> pageblock_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) migratetype_names[pageblock_mt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) page->flags, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ret >= count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nr_entries = stack_depot_fetch(handle, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ret >= count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (page_owner->last_migrate_reason != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret += snprintf(kbuf + ret, count - ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) "Page has been migrated, last migrate reason: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) migrate_reason_names[page_owner->last_migrate_reason]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ret >= count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret += snprintf(kbuf + ret, count - ret, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (ret >= count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (copy_to_user(buf, kbuf, ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void __dump_page_owner(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct page_ext *page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned long *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) gfp_t gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int mt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (unlikely(!page_ext)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) pr_alert("There is not page extension available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) gfp_mask = page_owner->gfp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mt = gfp_migratetype(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pr_alert("page_owner info is not present (never set?)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pr_alert("page_owner tracks the page as allocated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) pr_alert("page_owner tracks the page as freed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu, free_ts %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) handle = READ_ONCE(page_owner->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pr_alert("page_owner allocation stack trace missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) nr_entries = stack_depot_fetch(handle, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) stack_trace_print(entries, nr_entries, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) handle = READ_ONCE(page_owner->free_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pr_alert("page_owner free stack trace missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) nr_entries = stack_depot_fetch(handle, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pr_alert("page last free stack trace:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) stack_trace_print(entries, nr_entries, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (page_owner->last_migrate_reason != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pr_alert("page has been migrated, last migrate reason: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) migrate_reason_names[page_owner->last_migrate_reason]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct page_ext *page_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct page_owner *page_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) depot_stack_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!static_branch_unlikely(&page_owner_inited))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pfn = min_low_pfn + *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) drain_all_pages(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Find an allocated page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (; pfn < max_pfn; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * If the new page is in a new MAX_ORDER_NR_PAGES area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * validate the area as existing, skip it if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pfn += MAX_ORDER_NR_PAGES - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Check for holes within a MAX_ORDER area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!pfn_valid_within(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (PageBuddy(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long freepage_order = buddy_order_unsafe(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (freepage_order < MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) pfn += (1UL << freepage_order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * Some pages could be missed by concurrent allocation or free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * because we don't hold the zone lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Although we do have the info about past allocation of free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * pages, it's not relevant for current memory usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) page_owner = get_page_owner(page_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * Don't print "tail" pages of high-order allocations as that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * would inflate the stats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!IS_ALIGNED(pfn, 1 << page_owner->order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Access to page_ext->handle isn't synchronous so we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * be careful to access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) handle = READ_ONCE(page_owner->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Record the next PFN to read in the file offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) *ppos = (pfn - min_low_pfn) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return print_page_owner(buf, count, pfn, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) page_owner, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned long pfn = zone->zone_start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned long end_pfn = zone_end_pfn(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Walk the zone in pageblock_nr_pages steps. If a page block spans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * a zone boundary, it will be double counted between zones. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * not matter as the mixed block count will still be correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) for (; pfn < end_pfn; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned long block_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (!pfn_valid(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) block_end_pfn = min(block_end_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) for (; pfn < block_end_pfn; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct page_ext *page_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!pfn_valid_within(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (page_zone(page) != zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * To avoid having to grab zone->lock, be a little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * careful when reading buddy page order. The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * danger is that we skip too much and potentially miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * some early allocated pages, which is better than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * heavy lock contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (PageBuddy(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned long order = buddy_order_unsafe(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (order > 0 && order < MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) pfn += (1UL << order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) page_ext = lookup_page_ext(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (unlikely(!page_ext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Maybe overlapping zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Found early allocated page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) __set_page_owner_handle(page, page_ext, early_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) pgdat->node_id, zone->name, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static void init_zones_in_node(pg_data_t *pgdat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct zone *node_zones = pgdat->node_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!populated_zone(zone))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) init_pages_in_zone(pgdat, zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void init_early_allocated_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pg_data_t *pgdat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) for_each_online_pgdat(pgdat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) init_zones_in_node(pgdat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static const struct file_operations proc_page_owner_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) .read = read_page_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int __init pageowner_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!static_branch_unlikely(&page_owner_inited)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pr_info("page_owner is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) debugfs_create_file("page_owner", 0400, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) &proc_page_owner_operations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) late_initcall(pageowner_init)