^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Machine specific setup for xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/memory_hotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/e820/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/idtentry.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <xen/interface/callback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <xen/interface/physdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <xen/features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <xen/hvc-console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "xen-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Amount of extra memory space we add to the e820 ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Number of pages released from the initial allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long xen_released_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* E820 map used during setting up memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct e820_table xen_e820_table __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Buffer used to remap identity mapped pages. We only need the virtual space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * The physical page behind this address is remapped as needed to different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * buffer pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define REMAP_SIZE (P2M_PER_PAGE - 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long next_area_mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long target_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long mfns[REMAP_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) } xen_remap_buf __initdata __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * The maximum amount of extra memory compared to the base size. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * main scaling factor is the size of struct page. At extreme ratios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * of base:extra, all the base memory can be filled with page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * structures for the extra memory, leaving no space for anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * 10x seems like a reasonable balance between scaling flexibility and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * leaving a practically usable system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define EXTRA_MEM_RATIO (10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static void __init xen_parse_512gb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool val = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) char *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) val = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) xen_512gb_limit = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void __init xen_add_extra_mem(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long n_pfns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * No need to check for zero size, should happen rarely and will only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * write a new entry regarded to be unused due to zero size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Add new region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (xen_extra_mem[i].n_pfns == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) xen_extra_mem[i].start_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) xen_extra_mem[i].n_pfns = n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Append to existing region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) xen_extra_mem[i].n_pfns += n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (i == XEN_EXTRA_MEM_MAX_REGIONS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) printk(KERN_WARNING "Warning: not enough extra memory regions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void __init xen_del_extra_mem(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long n_pfns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned long start_r, size_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) start_r = xen_extra_mem[i].start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) size_r = xen_extra_mem[i].n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Start of region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (start_r == start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) BUG_ON(n_pfns > size_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) xen_extra_mem[i].start_pfn += n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) xen_extra_mem[i].n_pfns -= n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* End of region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (start_r + size_r == start_pfn + n_pfns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) BUG_ON(n_pfns > size_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) xen_extra_mem[i].n_pfns -= n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Mid of region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (start_pfn > start_r && start_pfn < start_r + size_r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) BUG_ON(start_pfn + n_pfns > start_r + size_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) xen_extra_mem[i].n_pfns = start_pfn - start_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Calling memblock_reserve() again is okay. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) (start_pfn + n_pfns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Called during boot before the p2m list can take entries beyond the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (pfn >= xen_extra_mem[i].start_pfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return INVALID_P2M_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return IDENTITY_FRAME(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Mark all pfns of extra mem as invalid in p2m list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void __init xen_inv_extra_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long pfn, pfn_s, pfn_e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!xen_extra_mem[i].n_pfns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pfn_s = xen_extra_mem[i].start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) for (pfn = pfn_s; pfn < pfn_e; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Finds the next RAM pfn available in the E820 map after min_pfn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * This function updates min_pfn with the pfn found and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * the size of that range or zero if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) const struct e820_entry *entry = xen_e820_table.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long s_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long e_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (entry->type != E820_TYPE_RAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) e_pfn = PFN_DOWN(entry->addr + entry->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* We only care about E820 after this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (e_pfn <= *min_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) s_pfn = PFN_UP(entry->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* If min_pfn falls within the E820 entry, we want to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * at the min_pfn PFN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (s_pfn <= *min_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) done = e_pfn - *min_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) done = e_pfn - s_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *min_pfn = s_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int __init xen_free_mfn(unsigned long mfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct xen_memory_reservation reservation = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .address_bits = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .extent_order = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .domid = DOMID_SELF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) set_xen_guest_handle(reservation.extent_start, &mfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) reservation.nr_extents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * This releases a chunk of memory and then does the identity map. It's used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * as a fallback if the remapping fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned long end_pfn, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long pfn, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) WARN_ON(start_pfn > end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Release pages first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) end = min(end_pfn, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) for (pfn = start_pfn; pfn < end; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long mfn = pfn_to_mfn(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Make sure pfn exists to start with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ret = xen_free_mfn(mfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) xen_released_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) set_phys_range_identity(start_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Helper function to update the p2m and m2p tables and kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct mmu_update update = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .val = pfn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Update p2m */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!set_phys_to_machine(pfn, mfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) pfn, mfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Update m2p */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mfn, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Update kernel mapping, but not for highmem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (pfn >= PFN_UP(__pa(high_memory - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mfn_pte(mfn, PAGE_KERNEL), 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mfn, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * This function updates the p2m and m2p tables with an identity map from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * original allocation at remap_pfn. The information needed for remapping is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * saved in the memory itself to avoid the need for allocating buffers. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * complete remap information is contained in a list of MFNs each containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * This enables us to preserve the original mfn sequence while doing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * remapping at a time when the memory management is capable of allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * its callers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void __init xen_do_set_identity_and_remap_chunk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long buf = (unsigned long)&xen_remap_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long mfn_save, mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long ident_pfn_iter, remap_pfn_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long ident_end_pfn = start_pfn + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned int i, chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) WARN_ON(size == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) mfn_save = virt_to_mfn(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ident_pfn_iter < ident_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Map first pfn to xen_remap_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) mfn = pfn_to_mfn(ident_pfn_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) set_pte_mfn(buf, mfn, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Save mapping information in page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) xen_remap_buf.next_area_mfn = xen_remap_mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) xen_remap_buf.target_pfn = remap_pfn_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) xen_remap_buf.size = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (i = 0; i < chunk; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Put remap buf into list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) xen_remap_mfn = mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Set identity map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) left -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Restore old xen_remap_buf mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * This function takes a contiguous pfn range that needs to be identity mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * and:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * 2) Calls the do_ function to actually do the mapping/remapping work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * The goal is to not allocate additional memory but to remap the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * pages. In the case of an error the underlying memory is simply released back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * to Xen and not remapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static unsigned long __init xen_set_identity_and_remap_chunk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned long remap_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long n = end_pfn - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (remap_pfn == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) remap_pfn = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) while (i < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned long cur_pfn = start_pfn + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long left = n - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long size = left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long remap_range_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Do not remap pages beyond the current allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (cur_pfn >= nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Identity map remaining pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) set_phys_range_identity(cur_pfn, cur_pfn + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (cur_pfn + size > nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) size = nr_pages - cur_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) remap_range_size = xen_find_pfn_range(&remap_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!remap_range_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) pr_warn("Unable to find available pfn range, not remapping identity pages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) xen_set_identity_and_release_chunk(cur_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) cur_pfn + left, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Adjust size to fit in current e820 RAM region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (size > remap_range_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) size = remap_range_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Update variables to reflect new mappings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) i += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) remap_pfn += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * If the PFNs are currently mapped, the VA mapping also needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * to be updated to be 1:1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) (void)HYPERVISOR_update_va_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (unsigned long)__va(pfn << PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mfn_pte(pfn, PAGE_KERNEL_IO), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return remap_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static unsigned long __init xen_count_remap_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned long remap_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (start_pfn >= nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return remap_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return remap_pages + min(end_pfn, nr_pages) - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned long nr_pages, unsigned long last_val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) phys_addr_t start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long ret_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) const struct e820_entry *entry = xen_e820_table.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Combine non-RAM regions and gaps until a RAM region (or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * end of the map) is reached, then call the provided function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * to perform its duty on the non-RAM region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * The combined non-RAM regions are rounded to a whole number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * of pages so any partial pages are accessible via the 1:1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * mapping. This is needed for some BIOSes that put (for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * example) the DMI tables in a reserved region that begins on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * a non-page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) phys_addr_t end = entry->addr + entry->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) unsigned long start_pfn = PFN_DOWN(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned long end_pfn = PFN_UP(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (entry->type == E820_TYPE_RAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) end_pfn = PFN_UP(entry->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (start_pfn < end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret_val = func(start_pfn, end_pfn, nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ret_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * The remap information (which mfn remap to which pfn) is contained in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * This scheme allows to remap the different chunks in arbitrary order while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * the resulting mapping will be independent from the order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) void __init xen_remap_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned long buf = (unsigned long)&xen_remap_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long mfn_save, pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) unsigned long remapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned long pfn_s = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned long len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) mfn_save = virt_to_mfn(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) while (xen_remap_mfn != INVALID_P2M_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Map the remap information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pfn = xen_remap_buf.target_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) for (i = 0; i < xen_remap_buf.size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) remapped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (pfn_s == ~0UL || pfn == pfn_s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) pfn_s = xen_remap_buf.target_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) len += xen_remap_buf.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } else if (pfn_s + len == xen_remap_buf.target_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) len += xen_remap_buf.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) xen_del_extra_mem(pfn_s, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) pfn_s = xen_remap_buf.target_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) len = xen_remap_buf.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) xen_remap_mfn = xen_remap_buf.next_area_mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (pfn_s != ~0UL && len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) xen_del_extra_mem(pfn_s, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) pr_info("Remapped %ld page(s)\n", remapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static unsigned long __init xen_get_pages_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned long limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) limit = MAXMEM / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!xen_initial_domain() && xen_512gb_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) limit = GB(512) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static unsigned long __init xen_get_max_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long max_pages, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) domid_t domid = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) limit = xen_get_pages_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) max_pages = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * For the initial domain we use the maximum reservation as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * the maximum page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * For guest domains the current maximum reservation reflects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * the current maximum rather than the static maximum. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * case the e820 map provided to us will cover the static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * maximum region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (xen_initial_domain()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) max_pages = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return min(max_pages, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static void __init xen_align_and_add_e820_region(phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) phys_addr_t size, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) phys_addr_t end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Align RAM regions to page boundaries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (type == E820_TYPE_RAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) start = PAGE_ALIGN(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) end &= ~((phys_addr_t)PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Don't allow adding memory not in E820 map while booting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * system. Once the balloon driver is up it will remove that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * restriction again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) max_mem_size = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) e820__range_add(start, end - start, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void __init xen_ignore_unusable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct e820_entry *entry = xen_e820_table.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (entry->type == E820_TYPE_UNUSABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) entry->type = E820_TYPE_RAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct e820_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unsigned mapcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) phys_addr_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) entry = xen_e820_table.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) (entry->addr + entry->size) >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Find a free area in physical memory not yet reserved and compliant with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * E820 map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Used to relocate pre-allocated areas like initrd or p2m list which are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * conflict with the to be used E820 map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * In case no area is found, return 0. Otherwise return the physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * of the area which is already reserved for convenience.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) phys_addr_t __init xen_find_free_area(phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) unsigned mapcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) phys_addr_t addr, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct e820_entry *entry = xen_e820_table.entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (entry->type != E820_TYPE_RAM || entry->size < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) start = entry->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) for (addr = start; addr < start + size; addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!memblock_is_reserved(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) start = addr + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (start + size > entry->addr + entry->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (addr >= start + size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) memblock_reserve(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Like memcpy, but with physical addresses for dest and src.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) phys_addr_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) phys_addr_t dest_off, src_off, dest_len, src_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) void *from, *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dest_off = dest & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) src_off = src & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dest_len = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) src_len = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) len = min(dest_len, src_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) to = early_memremap(dest - dest_off, dest_len + dest_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) from = early_memremap(src - src_off, src_len + src_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) memcpy(to, from, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) early_memunmap(to, dest_len + dest_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) early_memunmap(from, src_len + src_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) n -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dest += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) src += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Reserve Xen mfn_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static void __init xen_reserve_xen_mfnlist(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) phys_addr_t start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (xen_start_info->mfn_list >= __START_KERNEL_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) start = __pa(xen_start_info->mfn_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) size = PFN_ALIGN(xen_start_info->nr_pages *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) start = PFN_PHYS(xen_start_info->first_p2m_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) size = PFN_PHYS(xen_start_info->nr_p2m_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) memblock_reserve(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (!xen_is_e820_reserved(start, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) xen_relocate_p2m();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) memblock_free(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * machine_specific_memory_setup - Hook for machine specific memory setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) char * __init xen_memory_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned long max_pfn, pfn_s, n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) phys_addr_t mem_end, addr, size, chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct xen_memory_map memmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned long max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) unsigned long extra_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) int op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xen_parse_512gb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) max_pfn = xen_get_pages_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) max_pfn = min(max_pfn, xen_start_info->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) mem_end = PFN_PHYS(max_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) xen_saved_max_mem_size = max_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) op = xen_initial_domain() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) XENMEM_machine_memory_map :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) XENMEM_memory_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) rc = HYPERVISOR_memory_op(op, &memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (rc == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) BUG_ON(xen_initial_domain());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) memmap.nr_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) xen_e820_table.entries[0].addr = 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) xen_e820_table.entries[0].size = mem_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* 8MB slack (to balance backend allocations). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) xen_e820_table.entries[0].size += 8ULL << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) xen_e820_table.entries[0].type = E820_TYPE_RAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) BUG_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) BUG_ON(memmap.nr_entries == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) xen_e820_table.nr_entries = memmap.nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Xen won't allow a 1:1 mapping to be created to UNUSABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * regions, so if we're using the machine memory map leave the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * region as RAM as it is in the pseudo-physical map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * UNUSABLE regions in domUs are not handled and will need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * a patch in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) xen_ignore_unusable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* Make sure the Xen-supplied memory map is well-ordered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) e820__update_table(&xen_e820_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) max_pages = xen_get_max_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* How many extra pages do we need due to remapping? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (max_pages > max_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) extra_pages += max_pages - max_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * factor the base size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Make sure we have no memory above max_pages, as this area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * isn't handled by the p2m management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) extra_pages, max_pages - max_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) addr = xen_e820_table.entries[0].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) size = xen_e820_table.entries[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) while (i < xen_e820_table.nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) bool discard = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) chunk_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) type = xen_e820_table.entries[i].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (type == E820_TYPE_RAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (addr < mem_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) chunk_size = min(size, mem_end - addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) } else if (extra_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) chunk_size = min(size, PFN_PHYS(extra_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pfn_s = PFN_UP(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) extra_pages -= n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) xen_add_extra_mem(pfn_s, n_pfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) xen_max_p2m_pfn = pfn_s + n_pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) discard = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (!discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) xen_align_and_add_e820_region(addr, chunk_size, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) addr += chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) size -= chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (i < xen_e820_table.nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) addr = xen_e820_table.entries[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) size = xen_e820_table.entries[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * Set the rest as identity mapped, in case PCI BARs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * located here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * In domU, the ISA region is normal, usable memory, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * reserve ISA memory anyway because too many things poke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * about in there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) e820__update_table(e820_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Check whether the kernel itself conflicts with the target E820 map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * Failing now is better than running into weird problems later due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * to relocating (and even reusing) pages with kernel text or data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (xen_is_e820_reserved(__pa_symbol(_text),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Check for a conflict of the hypervisor supplied page tables with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * the target E820 map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) xen_pt_check_e820();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) xen_reserve_xen_mfnlist();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Check for a conflict of the initrd with the target E820 map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) boot_params.hdr.ramdisk_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) phys_addr_t new_area, start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!new_area) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) start = boot_params.hdr.ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) size = boot_params.hdr.ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) xen_phys_memcpy(new_area, start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) start, start + size, new_area, new_area + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memblock_free(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) boot_params.hdr.ramdisk_image = new_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) boot_params.ext_ramdisk_image = new_area >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * Set identity map on non-RAM pages and prepare remapping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * underlying RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) pr_info("Released %ld page(s)\n", xen_released_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return "Xen";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static int register_callback(unsigned type, const void *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct callback_register callback = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .type = type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .address = XEN_CALLBACK(__KERNEL_CS, func),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .flags = CALLBACKF_mask_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) void xen_enable_sysenter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) unsigned sysenter_feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) sysenter_feature = X86_FEATURE_SYSENTER32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (!boot_cpu_has(sysenter_feature))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if(ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) setup_clear_cpu_cap(sysenter_feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) void xen_enable_syscall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Pretty fatal; 64-bit userspace has no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) mechanism for syscalls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ret = register_callback(CALLBACKTYPE_syscall32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) xen_syscall32_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static void __init xen_pvmmu_arch_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) HYPERVISOR_vm_assist(VMASST_CMD_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) VMASST_TYPE_pae_extended_cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (register_callback(CALLBACKTYPE_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) xen_asm_exc_xen_hypervisor_callback) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) xen_enable_sysenter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) xen_enable_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* This function is not called for HVM domains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) void __init xen_arch_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) xen_panic_handler_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) xen_pvmmu_arch_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) disable_acpi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) memcpy(boot_command_line, xen_start_info->cmd_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* Set up idle, making sure it calls safe_halt() pvop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) disable_cpuidle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) disable_cpufreq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) WARN_ON(xen_set_default_idle());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) numa_off = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }