^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Xen memory reservation utilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2003, B Dragovic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2003-2004, M Williamson, K Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (c) 2005 Dan M. Smith, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2010 Daniel Kiper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <xen/mem-reservation.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Use one extent per PAGE_SIZE to avoid to break down the page into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * multiple frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #ifdef CONFIG_XEN_HAVE_PVMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void __xenmem_reservation_va_mapping_update(unsigned long count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) xen_pfn_t *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct page *page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long pfn = page_to_pfn(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) BUG_ON(!page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * We don't support PV MMU when Linux and Xen is using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * different page granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) set_phys_to_machine(pfn, frames[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Link back into the page tables if not highmem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ret = HYPERVISOR_update_va_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) (unsigned long)__va(pfn << PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) mfn_pte(frames[i], PAGE_KERNEL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void __xenmem_reservation_va_mapping_reset(unsigned long count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct page **pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct page *page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long pfn = page_to_pfn(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * We don't support PV MMU when Linux and Xen are using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * different page granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ret = HYPERVISOR_update_va_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) (unsigned long)__va(pfn << PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __pte_ma(0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif /* CONFIG_XEN_HAVE_PVMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* @frames is an array of PFNs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int xenmem_reservation_increase(int count, xen_pfn_t *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct xen_memory_reservation reservation = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .address_bits = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .extent_order = EXTENT_ORDER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .domid = DOMID_SELF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) set_xen_guest_handle(reservation.extent_start, frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) reservation.nr_extents = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* @frames is an array of GFNs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct xen_memory_reservation reservation = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .address_bits = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .extent_order = EXTENT_ORDER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .domid = DOMID_SELF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* XENMEM_decrease_reservation requires a GFN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) set_xen_guest_handle(reservation.extent_start, frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) reservation.nr_extents = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);