b24413180f560 (Greg Kroah-Hartman 2017-11-01 15:07:57 +0100 1) /* SPDX-License-Identifier: GPL-2.0 */
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 2) #ifndef INCLUDE_XEN_OPS_H
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 3) #define INCLUDE_XEN_OPS_H
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 4)
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 5) #include <linux/percpu.h>
cd979883b9ede (Stanislaw Gruszka 2014-02-26 11:30:30 +0100 6) #include <linux/notifier.h>
be81c8a1da242 (Daniel Kiper 2014-06-30 19:53:02 +0200 7) #include <linux/efi.h>
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 8) #include <xen/features.h>
7892f6928d0cd (Ian Campbell 2012-10-16 17:19:15 +0100 9) #include <asm/xen/interface.h>
4ccefbe597392 (Stefano Stabellini 2015-11-05 15:15:07 +0000 10) #include <xen/interface/vcpu.h>
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 11)
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 12) DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 13)
55467dea29672 (Vitaly Kuznetsov 2016-07-29 11:06:48 +0200 14) DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
55467dea29672 (Vitaly Kuznetsov 2016-07-29 11:06:48 +0200 15) static inline uint32_t xen_vcpu_nr(int cpu)
88e957d6e47f1 (Vitaly Kuznetsov 2016-06-30 17:56:37 +0200 16) {
88e957d6e47f1 (Vitaly Kuznetsov 2016-06-30 17:56:37 +0200 17) return per_cpu(xen_vcpu_id, cpu);
88e957d6e47f1 (Vitaly Kuznetsov 2016-06-30 17:56:37 +0200 18) }
88e957d6e47f1 (Vitaly Kuznetsov 2016-06-30 17:56:37 +0200 19)
0b64ffb8db4e3 (Ankur Arora 2017-06-02 17:05:59 -0700 20) #define XEN_VCPU_ID_INVALID U32_MAX
0b64ffb8db4e3 (Ankur Arora 2017-06-02 17:05:59 -0700 21)
03c8142bd2fb3 (Ian Campbell 2011-02-17 11:04:20 +0000 22) void xen_arch_pre_suspend(void);
03c8142bd2fb3 (Ian Campbell 2011-02-17 11:04:20 +0000 23) void xen_arch_post_suspend(int suspend_cancelled);
0e91398f2a5d4 (Jeremy Fitzhardinge 2008-05-26 23:31:27 +0100 24)
ad55db9fed6d6 (Isaku Yamahata 2008-07-08 15:06:32 -0700 25) void xen_timer_resume(void);
ad55db9fed6d6 (Isaku Yamahata 2008-07-08 15:06:32 -0700 26) void xen_arch_resume(void);
2b953a5e994ce (Boris Ostrovsky 2015-04-28 18:46:20 -0400 27) void xen_arch_suspend(void);
ad55db9fed6d6 (Isaku Yamahata 2008-07-08 15:06:32 -0700 28)
5d9404e1185de (Julien Grall 2017-04-24 18:58:37 +0100 29) void xen_reboot(int reason);
5d9404e1185de (Julien Grall 2017-04-24 18:58:37 +0100 30)
cd979883b9ede (Stanislaw Gruszka 2014-02-26 11:30:30 +0100 31) void xen_resume_notifier_register(struct notifier_block *nb);
cd979883b9ede (Stanislaw Gruszka 2014-02-26 11:30:30 +0100 32) void xen_resume_notifier_unregister(struct notifier_block *nb);
cd979883b9ede (Stanislaw Gruszka 2014-02-26 11:30:30 +0100 33)
4ccefbe597392 (Stefano Stabellini 2015-11-05 15:15:07 +0000 34) bool xen_vcpu_stolen(int vcpu);
4ccefbe597392 (Stefano Stabellini 2015-11-05 15:15:07 +0000 35) void xen_setup_runstate_info(int cpu);
ecb23dc6f2eff (Juergen Gross 2016-05-20 09:26:48 +0200 36) void xen_time_setup_guest(void);
5e25f5db6abb9 (Dongli Zhang 2017-11-01 09:46:33 +0800 37) void xen_manage_runstate_time(int action);
4ccefbe597392 (Stefano Stabellini 2015-11-05 15:15:07 +0000 38) void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
d34c30cc1fa80 (Juergen Gross 2016-07-26 14:15:11 +0200 39) u64 xen_steal_clock(int cpu);
4ccefbe597392 (Stefano Stabellini 2015-11-05 15:15:07 +0000 40)
016b6f5fe8398 (Stefano Stabellini 2010-05-14 12:45:07 +0100 41) int xen_setup_shutdown_event(void);
016b6f5fe8398 (Stefano Stabellini 2010-05-14 12:45:07 +0100 42)
08bbc9da92f7e (Alex Nixon 2009-02-09 12:05:46 -0800 43) extern unsigned long *xen_contiguous_bitmap;
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 44)
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 45) #if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
1b65c4e5a9af1 (Stefano Stabellini 2013-10-10 13:41:10 +0000 46) int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
69908907b02ef (Stefano Stabellini 2013-10-09 16:56:32 +0000 47) unsigned int address_bits,
69908907b02ef (Stefano Stabellini 2013-10-09 16:56:32 +0000 48) dma_addr_t *dma_handle);
08bbc9da92f7e (Alex Nixon 2009-02-09 12:05:46 -0800 49)
1b65c4e5a9af1 (Stefano Stabellini 2013-10-10 13:41:10 +0000 50) void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 51) #else
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 52) static inline int xen_create_contiguous_region(phys_addr_t pstart,
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 53) unsigned int order,
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 54) unsigned int address_bits,
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 55) dma_addr_t *dma_handle)
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 56) {
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 57) return 0;
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 58) }
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 59)
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 60) static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 61) unsigned int order) { }
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 62) #endif
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 63)
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 64) #if defined(CONFIG_XEN_PV)
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 65) int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 66) xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 67) unsigned int domid, bool no_translate, struct page **pages);
f900557170192 (Stefano Stabellini 2018-10-31 16:11:49 -0700 68) #else
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 69) static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 70) xen_pfn_t *pfn, int nr, int *err_ptr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 71) pgprot_t prot, unsigned int domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 72) bool no_translate, struct page **pages)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 73) {
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 74) BUG();
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 75) return 0;
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 76) }
16624390816c4 (Vitaly Kuznetsov 2017-03-14 18:35:54 +0100 77) #endif
08bbc9da92f7e (Alex Nixon 2009-02-09 12:05:46 -0800 78)
c140d87995b68 (David Howells 2012-03-28 18:30:02 +0100 79) struct vm_area_struct;
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 80)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 81) #ifdef CONFIG_XEN_AUTO_XLATE
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 82) int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 83) unsigned long addr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 84) xen_pfn_t *gfn, int nr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 85) int *err_ptr, pgprot_t prot,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 86) unsigned int domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 87) struct page **pages);
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 88) int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 89) int nr, struct page **pages);
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 90) #else
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 91) /*
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 92) * These two functions are called from arch/x86/xen/mmu.c and so stubs
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 93) * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 94) */
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 95) static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 96) unsigned long addr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 97) xen_pfn_t *gfn, int nr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 98) int *err_ptr, pgprot_t prot,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 99) unsigned int domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 100) struct page **pages)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 101) {
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 102) return -EOPNOTSUPP;
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 103) }
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 104)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 105) static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 106) int nr, struct page **pages)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 107) {
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 108) return -EOPNOTSUPP;
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 109) }
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 110) #endif
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 111)
a78d14a31666c (Arnd Bergmann 2019-07-22 09:46:29 +0200 112) int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
a78d14a31666c (Arnd Bergmann 2019-07-22 09:46:29 +0200 113) unsigned long len);
a78d14a31666c (Arnd Bergmann 2019-07-22 09:46:29 +0200 114)
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 115) /*
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 116) * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 117) * @vma: VMA to map the pages into
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 118) * @addr: Address at which to map the pages
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 119) * @gfn: Array of GFNs to map
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 120) * @nr: Number entries in the GFN array
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 121) * @err_ptr: Returns per-GFN error status.
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 122) * @prot: page protection mask
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 123) * @domid: Domain owning the pages
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 124) * @pages: Array of pages if this domain has an auto-translated physmap
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 125) *
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 126) * @gfn and @err_ptr may point to the same buffer, the GFNs will be
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 127) * overwritten by the error codes after they are mapped.
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 128) *
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 129) * Returns the number of successfully mapped frames, or a -ve error
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 130) * code.
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 131) */
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 132) static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 133) unsigned long addr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 134) xen_pfn_t *gfn, int nr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 135) int *err_ptr, pgprot_t prot,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 136) unsigned int domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 137) struct page **pages)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 138) {
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 139) if (xen_feature(XENFEAT_auto_translated_physmap))
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 140) return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 141) prot, domid, pages);
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 142)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 143) /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 144) * and the consequences later is quite hard to detect what the actual
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 145) * cause of "wrong memory was mapped in".
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 146) */
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 147) BUG_ON(err_ptr == NULL);
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 148) return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 149) false, pages);
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 150) }
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 151)
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 152) /*
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 153) * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 154) * @vma: VMA to map the pages into
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 155) * @addr: Address at which to map the pages
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 156) * @mfn: Array of MFNs to map
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 157) * @nr: Number entries in the MFN array
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 158) * @err_ptr: Returns per-MFN error status.
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 159) * @prot: page protection mask
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 160) * @domid: Domain owning the pages
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 161) * @pages: Array of pages if this domain has an auto-translated physmap
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 162) *
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 163) * @mfn and @err_ptr may point to the same buffer, the MFNs will be
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 164) * overwritten by the error codes after they are mapped.
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 165) *
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 166) * Returns the number of successfully mapped frames, or a -ve error
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 167) * code.
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 168) */
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 169) static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 170) unsigned long addr, xen_pfn_t *mfn,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 171) int nr, int *err_ptr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 172) pgprot_t prot, unsigned int domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 173) struct page **pages)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 174) {
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 175) if (xen_feature(XENFEAT_auto_translated_physmap))
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 176) return -EOPNOTSUPP;
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 177)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 178) return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 179) true, pages);
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 180) }
3ad0876554caf (Paul Durrant 2018-05-09 14:16:12 +0100 181)
a13d7201d7dee (Julien Grall 2015-08-07 17:34:41 +0100 182) /* xen_remap_domain_gfn_range() - map a range of foreign frames
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 183) * @vma: VMA to map the pages into
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 184) * @addr: Address at which to map the pages
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 185) * @gfn: First GFN to map.
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 186) * @nr: Number frames to map
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 187) * @prot: page protection mask
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 188) * @domid: Domain owning the pages
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 189) * @pages: Array of pages if this domain has an auto-translated physmap
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 190) *
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 191) * Returns the number of successfully mapped frames, or a -ve error
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 192) * code.
4e8c0c8c4bf3a (David Vrabel 2015-03-11 14:49:57 +0000 193) */
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 194) static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 195) unsigned long addr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 196) xen_pfn_t gfn, int nr,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 197) pgprot_t prot, unsigned int domid,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 198) struct page **pages)
ec4001c3f29eb (Paul Durrant 2017-11-03 17:04:11 +0000 199) {
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 200) if (xen_feature(XENFEAT_auto_translated_physmap))
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 201) return -EOPNOTSUPP;
ec4001c3f29eb (Paul Durrant 2017-11-03 17:04:11 +0000 202)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 203) return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 204) pages);
ec4001c3f29eb (Paul Durrant 2017-11-03 17:04:11 +0000 205) }
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 206)
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 207) int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
f030aade91650 (Juergen Gross 2018-08-28 09:40:13 +0200 208) int numpgs, struct page **pages);
ec4001c3f29eb (Paul Durrant 2017-11-03 17:04:11 +0000 209)
243848fc018cb (Shannon Zhao 2016-04-07 20:03:19 +0800 210) int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
243848fc018cb (Shannon Zhao 2016-04-07 20:03:19 +0800 211) unsigned long nr_grant_frames);
de1ef2065c467 (Ian Campbell 2009-05-21 10:09:46 +0100 212)
394b40f62d7ae (Konrad Rzeszutek Wilk 2012-11-27 11:39:40 -0500 213) bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
be81c8a1da242 (Daniel Kiper 2014-06-30 19:53:02 +0200 214)
09515706857a7 (Juergen Gross 2019-10-01 10:25:34 +0200 215) void xen_efi_runtime_setup(void);
e371fd7607999 (Julien Grall 2017-04-24 18:58:39 +0100 216)
be81c8a1da242 (Daniel Kiper 2014-06-30 19:53:02 +0200 217)
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 218) #if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 219)
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 220) DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 221)
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 222) static inline void xen_preemptible_hcall_begin(void)
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 223) {
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 224) __this_cpu_write(xen_in_preemptible_hcall, true);
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 225) }
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 226)
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 227) static inline void xen_preemptible_hcall_end(void)
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 228) {
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 229) __this_cpu_write(xen_in_preemptible_hcall, false);
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 230) }
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 231)
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 232) #else
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 233)
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 234) static inline void xen_preemptible_hcall_begin(void) { }
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 235) static inline void xen_preemptible_hcall_end(void) { }
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 236)
2f6474e4636bc (Thomas Gleixner 2020-05-21 22:05:26 +0200 237) #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
fdfd811ddde36 (David Vrabel 2015-02-19 15:23:17 +0000 238)
e04d0d0767a9c (Isaku Yamahata 2008-04-02 10:53:55 -0700 239) #endif /* INCLUDE_XEN_OPS_H */