^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <asm/xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "multicalls.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) unsigned long arbitrary_virt_to_mfn(void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) return PFN_DOWN(maddr.maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) xmaddr_t arbitrary_virt_to_machine(void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long address = (unsigned long)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * if the PFN is in the linear mapped vaddr range, we can just use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * the (quick) virt_to_machine() p2m lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (virt_addr_valid(vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return virt_to_machine(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* otherwise we have to do a (slower) full page-table walk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) pte = lookup_address(address, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) BUG_ON(pte == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) offset = address & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Returns: 0 success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int nr, struct page **pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (xen_feature(XENFEAT_auto_translated_physmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return xen_xlate_unmap_gfn_range(vma, nr, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);