^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <xen/swiotlb-xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/xen/interface.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct xen_p2m_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct rb_node rbnode_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static rwlock_t p2m_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct rb_root phys_to_mach = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) EXPORT_SYMBOL_GPL(phys_to_mach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct rb_node **link = &phys_to_mach.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct xen_p2m_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (new->pfn == entry->pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (new->pfn < entry->pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) link = &(*link)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) link = &(*link)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) rb_link_node(&new->rbnode_phys, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) rb_insert_color(&new->rbnode_phys, &phys_to_mach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long __pfn_to_mfn(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct rb_node *n = phys_to_mach.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct xen_p2m_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) read_lock_irqsave(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (entry->pfn <= pfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) entry->pfn + entry->nr_pages > pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long mfn = entry->mfn + (pfn - entry->pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) read_unlock_irqrestore(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (pfn < entry->pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) read_unlock_irqrestore(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return INVALID_P2M_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) EXPORT_SYMBOL_GPL(__pfn_to_mfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct gnttab_map_grant_ref *kmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct page **pages, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct gnttab_unmap_grant_ref unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (map_ops[i].status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Signal an error for this slot. This in turn requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * immediate unmapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) map_ops[i].status = GNTST_general_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unmap.host_addr = map_ops[i].host_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unmap.handle = map_ops[i].handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) map_ops[i].handle = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (map_ops[i].flags & GNTMAP_device_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unmap.dev_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Pre-populate the status field, to be recognizable in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * the log message below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unmap.status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) &unmap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (rc || unmap.status != GNTST_okay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) rc, unmap.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct gnttab_unmap_grant_ref *kunmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct page **pages, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) INVALID_P2M_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool __set_phys_to_machine_multi(unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long mfn, unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct xen_p2m_entry *p2m_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct rb_node *n = phys_to_mach.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (mfn == INVALID_P2M_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) write_lock_irqsave(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (p2m_entry->pfn <= pfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) write_unlock_irqrestore(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kfree(p2m_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (pfn < p2m_entry->pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) write_unlock_irqrestore(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!p2m_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) p2m_entry->pfn = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) p2m_entry->nr_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) p2m_entry->mfn = mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) write_lock_irqsave(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) rc = xen_add_phys_to_mach_entry(p2m_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) write_unlock_irqrestore(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) kfree(p2m_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) write_unlock_irqrestore(&p2m_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return __set_phys_to_machine_multi(pfn, mfn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) EXPORT_SYMBOL_GPL(__set_phys_to_machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int p2m_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rwlock_init(&p2m_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) arch_initcall(p2m_init);