^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * mm/balloon_compaction.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Common interface for making balloon pages movable by compaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/balloon_compaction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Block others from accessing the 'page' when we get around to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * establishing additional references. We should be the only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * holding a reference to the 'page' at this point. If we are not, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * memory corruption is possible and we should stop execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) BUG_ON(!trylock_page(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) balloon_page_insert(b_dev_info, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __count_vm_event(BALLOON_INFLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @b_dev_info: balloon device descriptor where we will insert a new page to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @pages: pages to enqueue - allocated using balloon_page_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Driver must call this function to properly enqueue balloon pages before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * definitively removing them from the guest system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Return: number of pages that were enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct list_head *pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct page *page, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) size_t n_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) spin_lock_irqsave(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) list_for_each_entry_safe(page, tmp, pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) balloon_page_enqueue_one(b_dev_info, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) n_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return n_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * balloon_page_list_dequeue() - removes pages from balloon's page list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * returns a list of the pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @b_dev_info: balloon device decriptor where we will grab a page from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @pages: pointer to the list of pages that would be returned to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @n_req_pages: number of requested pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Driver must call this function to properly de-allocate a previous enlisted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * balloon pages before definitively releasing it back to the guest system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * This function tries to remove @n_req_pages from the ballooned pages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * return them to the caller in the @pages list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Note that this function may fail to dequeue some pages even if the balloon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * isn't empty - since the page list can be temporarily empty due to compaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * of isolated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Return: number of pages that were added to the @pages list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct list_head *pages, size_t n_req_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct page *page, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) size_t n_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) spin_lock_irqsave(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (n_pages == n_req_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Block others from accessing the 'page' while we get around to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * establishing additional references and preparing the 'page'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * to be released by the balloon driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!trylock_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) PageIsolated(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* raced with isolation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) balloon_page_delete(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __count_vm_event(BALLOON_DEFLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) list_add(&page->lru, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) n_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return n_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * balloon_page_alloc - allocates a new page for insertion into the balloon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * page list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Driver must call this function to properly allocate a new balloon page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Driver must call balloon_page_enqueue before definitively removing the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * from the guest system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Return: struct page for the allocated page or NULL on allocation failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct page *balloon_page_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct page *page = alloc_page(balloon_mapping_gfp_mask() |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __GFP_NOMEMALLOC | __GFP_NORETRY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) EXPORT_SYMBOL_GPL(balloon_page_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * balloon_page_enqueue - inserts a new page into the balloon page list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @b_dev_info: balloon device descriptor where we will insert a new page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @page: new page to enqueue - allocated using balloon_page_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Drivers must call this function to properly enqueue a new allocated balloon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * page before definitively removing the page from the guest system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Drivers must not call balloon_page_enqueue on pages that have been pushed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * a list with balloon_page_push before removing them with balloon_page_pop. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * enqueue a list of pages, use balloon_page_list_enqueue instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_lock_irqsave(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) balloon_page_enqueue_one(b_dev_info, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) EXPORT_SYMBOL_GPL(balloon_page_enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * balloon_page_dequeue - removes a page from balloon's page list and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * its address to allow the driver to release the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @b_dev_info: balloon device decriptor where we will grab a page from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Driver must call this function to properly dequeue a previously enqueued page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * before definitively releasing it back to the guest system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Caller must perform its own accounting to ensure that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * function is called only if some pages are actually enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Note that this function may fail to dequeue some pages even if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * some enqueued pages - since the page list can be temporarily empty due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * the compaction of isolated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * TODO: remove the caller accounting requirements, and allow caller to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * until all pages can be dequeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Return: struct page for the dequeued page, or NULL if no page was dequeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) LIST_HEAD(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int n_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (n_pages != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * If we are unable to dequeue a balloon page because the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * list is empty and there are no isolated pages, then something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * went out of track and some balloon pages are lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * BUG() here, otherwise the balloon driver may get stuck in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * an infinite loop while attempting to release all its pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) spin_lock_irqsave(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (unlikely(list_empty(&b_dev_info->pages) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) !b_dev_info->isolated_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return list_first_entry(&pages, struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) EXPORT_SYMBOL_GPL(balloon_page_dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #ifdef CONFIG_BALLOON_COMPACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct balloon_dev_info *b_dev_info = balloon_page_device(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_lock_irqsave(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) b_dev_info->isolated_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void balloon_page_putback(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct balloon_dev_info *b_dev_info = balloon_page_device(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) spin_lock_irqsave(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) list_add(&page->lru, &b_dev_info->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) b_dev_info->isolated_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* move_to_new_page() counterpart for a ballooned page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int balloon_page_migrate(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct page *newpage, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enum migrate_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct balloon_dev_info *balloon = balloon_page_device(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * We can not easily support the no copy case here so ignore it as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * is unlikely to be used with balloon pages. See include/linux/hmm.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * for a user of the MIGRATE_SYNC_NO_COPY mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (mode == MIGRATE_SYNC_NO_COPY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) VM_BUG_ON_PAGE(!PageLocked(page), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return balloon->migratepage(balloon, newpage, page, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) const struct address_space_operations balloon_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .migratepage = balloon_page_migrate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .isolate_page = balloon_page_isolate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .putback_page = balloon_page_putback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) EXPORT_SYMBOL_GPL(balloon_aops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif /* CONFIG_BALLOON_COMPACTION */