Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2015, 2016 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #ifndef __KVM_ARM_VGIC_MMIO_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #define __KVM_ARM_VGIC_MMIO_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) struct vgic_register_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 	unsigned int reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	unsigned int bits_per_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	unsigned int access_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 		unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 				      unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 		unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 					  gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 		void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 			      unsigned int len, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 		void (*its_write)(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 				  gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 				  unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 				      unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 				     unsigned int len, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 					 gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 					 unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) extern struct kvm_io_device_ops kvm_io_gic_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define VGIC_ACCESS_8bit	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define VGIC_ACCESS_32bit	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define VGIC_ACCESS_64bit	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * Generate a mask that covers the number of bytes required to address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * up to 1024 interrupts, each represented by <bits> bits. This assumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * that <bits> is a power of two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * (addr & mask) gives us the _byte_ offset for the INT ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * We multiply this by 8 the get the _bit_ offset, then divide this by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * the number of bits to learn the actual INT ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * But instead of a division (which requires a "long long div" implementation),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * we shift by the binary logarithm of <bits>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * This assumes that <bits> is a power of two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define VGIC_ADDR_TO_INTID(addr, bits)  (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 					8 >> ilog2(bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Some VGIC registers store per-IRQ information, with a different number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * of bits per IRQ. For those registers this macro is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * The _WITH_LENGTH version instantiates registers with a fixed length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * and is mutually exclusive with the _PER_IRQ version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	{								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		.reg_offset = off,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		.bits_per_irq = bpi,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		.len = bpi * 1024 / 8,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		.access_flags = acc,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		.read = rd,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		.write = wr,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		.uaccess_read = ur,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		.uaccess_write = uw,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	{								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		.reg_offset = off,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		.bits_per_irq = 0,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		.len = length,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		.access_flags = acc,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		.read = rd,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		.write = wr,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	{								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		.reg_offset = off,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		.bits_per_irq = 0,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		.len = length,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		.access_flags = acc,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		.read = rd,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		.write = wr,						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		.uaccess_read = urd,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		.uaccess_write = uwr,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				unsigned long data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long extract_bytes(u64 data, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			    unsigned int num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		     unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 				 gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				 gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			unsigned int len, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			       unsigned int len, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 				   unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			   unsigned int len, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 				    gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			     gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			     unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			     gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			     unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			       gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			       unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			       gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			       unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 				     gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			      gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			      unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			      gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			      unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 				gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				    gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 				    gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			     gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			     unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			     gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			     unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				    gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				    unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 				    gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				    unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 				      gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			      gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			      unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				    gpa_t addr, unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			    gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			    unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		 bool is_write, int offset, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 				    const u64 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u64 vgic_sanitise_outer_cacheability(u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u64 vgic_sanitise_inner_cacheability(u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u64 vgic_sanitise_shareability(u64 reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			u64 (*sanitise_fn)(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Find the proper register handler entry given a certain address offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) const struct vgic_register_region *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) vgic_find_mmio_region(const struct vgic_register_region *regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		      int nr_regions, unsigned int offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #endif