Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * OpenPIC emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2004 Jocelyn Mayer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *               2011 Alexander Graf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * of this software and associated documentation files (the "Software"), to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * in the Software without restriction, including without limitation the rights
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * copies of the Software, and to permit persons to whom the Software is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/anon_inodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/mpic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/kvm_para.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <kvm/iodev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define MAX_CPU     32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define MAX_SRC     256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define MAX_TMR     4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define MAX_IPI     4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define MAX_MSI     8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define MAX_IRQ     (MAX_SRC + MAX_IPI + MAX_TMR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define VID         0x03	/* MPIC version ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /* OpenPIC capability flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define OPENPIC_FLAG_IDR_CRIT     (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define OPENPIC_FLAG_ILR          (2 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /* OpenPIC address map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define OPENPIC_REG_SIZE             0x40000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define OPENPIC_GLB_REG_START        0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define OPENPIC_GLB_REG_SIZE         0x10F0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define OPENPIC_TMR_REG_START        0x10F0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define OPENPIC_TMR_REG_SIZE         0x220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define OPENPIC_MSI_REG_START        0x1600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define OPENPIC_MSI_REG_SIZE         0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define OPENPIC_SUMMARY_REG_START    0x3800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define OPENPIC_SUMMARY_REG_SIZE     0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define OPENPIC_SRC_REG_START        0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define OPENPIC_SRC_REG_SIZE         (MAX_SRC * 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define OPENPIC_CPU_REG_START        0x20000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define OPENPIC_CPU_REG_SIZE         (0x100 + ((MAX_CPU - 1) * 0x1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) struct fsl_mpic_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	int max_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static struct fsl_mpic_info fsl_mpic_20 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	.max_ext = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static struct fsl_mpic_info fsl_mpic_42 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.max_ext = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define FRR_NIRQ_SHIFT    16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define FRR_NCPU_SHIFT     8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define FRR_VID_SHIFT      0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define VID_REVISION_1_2   2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define VID_REVISION_1_3   3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define VIR_GENERIC      0x00000000	/* Generic Vendor ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define GCR_RESET        0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define GCR_MODE_PASS    0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define GCR_MODE_MIXED   0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define GCR_MODE_PROXY   0x60000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define TBCR_CI           0x80000000	/* count inhibit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define TCCR_TOG          0x80000000	/* toggles when decrement to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define IDR_EP_SHIFT      31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define IDR_EP_MASK       (1 << IDR_EP_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define IDR_CI0_SHIFT     30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define IDR_CI1_SHIFT     29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define IDR_P1_SHIFT      1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define IDR_P0_SHIFT      0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define ILR_INTTGT_MASK   0x000000ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define ILR_INTTGT_INT    0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define ILR_INTTGT_CINT   0x01	/* critical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define ILR_INTTGT_MCP    0x02	/* machine check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define NUM_OUTPUTS       3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define MSIIR_OFFSET       0x140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define MSIIR_SRS_SHIFT    29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define MSIIR_SRS_MASK     (0x7 << MSIIR_SRS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define MSIIR_IBS_SHIFT    24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define MSIIR_IBS_MASK     (0x1f << MSIIR_IBS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static int get_current_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return vcpu ? vcpu->arch.irq_cpu_id : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	/* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 				      u32 val, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 				     u32 *ptr, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				    uint32_t val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) enum irq_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	IRQ_TYPE_NORMAL = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	IRQ_TYPE_FSLINT,	/* FSL internal interrupt -- level only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	IRQ_TYPE_FSLSPECIAL,	/* FSL timer/IPI interrupt, edge, no polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) struct irq_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	/* Round up to the nearest 64 IRQs so that the queue length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * won't change when moving between 32 and 64 bit hosts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	int next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	int priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) struct irq_source {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	uint32_t ivpr;		/* IRQ vector/priority register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	uint32_t idr;		/* IRQ destination register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	uint32_t destmask;	/* bitmap of CPU destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	int last_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	int output;		/* IRQ level, e.g. ILR_INTTGT_INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	int pending;		/* TRUE if IRQ is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	enum irq_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	bool level:1;		/* level-triggered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	bool nomask:1;	/* critical interrupts ignore mask on some FSL MPICs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define IVPR_MASK_SHIFT       31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define IVPR_MASK_MASK        (1 << IVPR_MASK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define IVPR_ACTIVITY_SHIFT   30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define IVPR_ACTIVITY_MASK    (1 << IVPR_ACTIVITY_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define IVPR_MODE_SHIFT       29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define IVPR_MODE_MASK        (1 << IVPR_MODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define IVPR_POLARITY_SHIFT   23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define IVPR_POLARITY_MASK    (1 << IVPR_POLARITY_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define IVPR_SENSE_SHIFT      22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define IVPR_SENSE_MASK       (1 << IVPR_SENSE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define IVPR_PRIORITY_MASK     (0xF << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define IDR_EP      0x80000000	/* external pin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define IDR_CI      0x40000000	/* critical interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) struct irq_dest {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	int32_t ctpr;		/* CPU current task priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	struct irq_queue raised;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	struct irq_queue servicing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	/* Count of IRQ sources asserting on non-INT outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	uint32_t outputs_active[NUM_OUTPUTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define MAX_MMIO_REGIONS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) struct openpic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	struct kvm *kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct kvm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct kvm_io_device mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	const struct mem_reg *mmio_regions[MAX_MMIO_REGIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	int num_mmio_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	gpa_t reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	/* Behavior control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	struct fsl_mpic_info *fsl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	uint32_t model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	uint32_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	uint32_t nb_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	uint32_t vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	uint32_t vir;		/* Vendor identification register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	uint32_t vector_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	uint32_t tfrr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	uint32_t ivpr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	uint32_t idr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	uint32_t brr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	uint32_t mpic_mode_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	/* Global registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	uint32_t frr;		/* Feature reporting register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	uint32_t gcr;		/* Global configuration register  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	uint32_t pir;		/* Processor initialization register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	uint32_t spve;		/* Spurious vector register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	uint32_t tfrr;		/* Timer frequency reporting register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/* Source registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct irq_source src[MAX_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	/* Local registers per output pin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	struct irq_dest dst[MAX_CPU];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	uint32_t nb_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/* Timer registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		uint32_t tccr;	/* Global timer current count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		uint32_t tbcr;	/* Global timer base count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	} timers[MAX_TMR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	/* Shared MSI registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		uint32_t msir;	/* Shared Message Signaled Interrupt Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	} msi[MAX_MSI];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	uint32_t max_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	uint32_t irq_ipi0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	uint32_t irq_tim0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	uint32_t irq_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			   int output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct kvm_interrupt irq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		.irq = KVM_INTERRUPT_SET_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (!dst->vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		pr_debug("%s: destination cpu %d does not exist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			 __func__, (int)(dst - &opp->dst[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if (output != ILR_INTTGT_INT)	/* TODO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			   int output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (!dst->vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		pr_debug("%s: destination cpu %d does not exist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			 __func__, (int)(dst - &opp->dst[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (output != ILR_INTTGT_INT)	/* TODO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	kvmppc_core_dequeue_external(dst->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	set_bit(n_IRQ, q->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	clear_bit(n_IRQ, q->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static void IRQ_check(struct openpic *opp, struct irq_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	int irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	int next = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	int priority = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		if (irq == opp->max_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			next = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			priority = IVPR_PRIORITY(opp->src[irq].ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	q->next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	q->priority = priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static int IRQ_get_next(struct openpic *opp, struct irq_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	/* XXX: optimize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	IRQ_check(opp, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	return q->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			   bool active, bool was_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct irq_dest *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct irq_source *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	int priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	dst = &opp->dst[n_CPU];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	src = &opp->src[n_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	pr_debug("%s: IRQ %d active %d was %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		__func__, n_IRQ, active, was_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	if (src->output != ILR_INTTGT_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		pr_debug("%s: output %d irq %d active %d was %d count %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			__func__, src->output, n_IRQ, active, was_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			dst->outputs_active[src->output]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		/* On Freescale MPIC, critical interrupts ignore priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		 * IACK, EOI, etc.  Before MPIC v4.1 they also ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		 * masking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		if (active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			if (!was_active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			    dst->outputs_active[src->output]++ == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 				pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 					__func__, src->output, n_CPU, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 				mpic_irq_raise(opp, dst, src->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			if (was_active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			    --dst->outputs_active[src->output] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 				pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 					__func__, src->output, n_CPU, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 				mpic_irq_lower(opp, dst, src->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	priority = IVPR_PRIORITY(src->ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	/* Even if the interrupt doesn't have enough priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	 * it is still raised, in case ctpr is lowered later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		IRQ_setbit(&dst->raised, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		IRQ_resetbit(&dst->raised, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	IRQ_check(opp, &dst->raised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (active && priority <= dst->ctpr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			__func__, n_IRQ, priority, dst->ctpr, n_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		    priority <= dst->servicing.priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 				__func__, n_IRQ, dst->servicing.next, n_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 				__func__, n_CPU, n_IRQ, dst->raised.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		IRQ_get_next(opp, &dst->servicing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (dst->raised.priority > dst->ctpr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		    dst->raised.priority > dst->servicing.priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				__func__, n_IRQ, dst->raised.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				dst->raised.priority, dst->ctpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 				dst->servicing.priority, n_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			/* IRQ line stays asserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 				__func__, n_IRQ, dst->ctpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 				dst->servicing.priority, n_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) /* update pic state because registers for n_IRQ have changed value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) static void openpic_update_irq(struct openpic *opp, int n_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	struct irq_source *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	bool active, was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	src = &opp->src[n_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	active = src->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		/* Interrupt source is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	 * We don't have a similar check for already-active because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	 * ctpr may have changed and we need to withdraw the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (!active && !was_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		src->ivpr |= IVPR_ACTIVITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		src->ivpr &= ~IVPR_ACTIVITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (src->destmask == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		/* No target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (src->destmask == (1 << src->last_cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		/* Only one CPU is allowed to receive this IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	} else if (!(src->ivpr & IVPR_MODE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		/* Directed delivery mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		for (i = 0; i < opp->nb_cpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			if (src->destmask & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 				IRQ_local_pipe(opp, i, n_IRQ, active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 					       was_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		/* Distributed delivery mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			if (i == opp->nb_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			if (src->destmask & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 				IRQ_local_pipe(opp, i, n_IRQ, active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 					       was_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				src->last_cpu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) static void openpic_set_irq(void *opaque, int n_IRQ, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct irq_source *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (n_IRQ >= MAX_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	src = &opp->src[n_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		n_IRQ, level, src->ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (src->level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		/* level-sensitive irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		src->pending = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		openpic_update_irq(opp, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		/* edge-sensitive irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		if (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			src->pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			openpic_update_irq(opp, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (src->output != ILR_INTTGT_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			/* Edge-triggered interrupts shouldn't be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			 * with non-INT delivery, but just in case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			 * try to make it do something sane rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			 * cause an interrupt storm.  This is close to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			 * what you'd probably see happen in real hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			src->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			openpic_update_irq(opp, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static void openpic_reset(struct openpic *opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	opp->gcr = GCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	/* Initialise controller registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	    (opp->vid << FRR_VID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	opp->pir = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	opp->spve = -1 & opp->vector_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	opp->tfrr = opp->tfrr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	/* Initialise IRQ sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	for (i = 0; i < opp->max_irq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		opp->src[i].ivpr = opp->ivpr_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		switch (opp->src[i].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		case IRQ_TYPE_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			opp->src[i].level =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			    !!(opp->ivpr_reset & IVPR_SENSE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		case IRQ_TYPE_FSLINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			opp->src[i].ivpr |= IVPR_POLARITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		case IRQ_TYPE_FSLSPECIAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		write_IRQreg_idr(opp, i, opp->idr_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/* Initialise IRQ destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	for (i = 0; i < MAX_CPU; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		opp->dst[i].ctpr = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		opp->dst[i].raised.next = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		opp->dst[i].servicing.next = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	/* Initialise timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	for (i = 0; i < MAX_TMR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		opp->timers[i].tccr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		opp->timers[i].tbcr = TBCR_CI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	/* Go out of RESET state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	opp->gcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	return opp->src[n_IRQ].idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (opp->flags & OPENPIC_FLAG_ILR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return opp->src[n_IRQ].output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	return 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	return opp->src[n_IRQ].ivpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 				    uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct irq_source *src = &opp->src[n_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	uint32_t crit_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	uint32_t mask = normal_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		crit_mask = mask << crit_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		mask |= crit_mask | IDR_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	src->idr = val & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (src->idr & crit_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			if (src->idr & normal_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 				pr_debug("%s: IRQ configured for multiple output types, using critical\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 					__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			src->output = ILR_INTTGT_CINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			src->nomask = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			src->destmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			for (i = 0; i < opp->nb_cpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				int n_ci = IDR_CI0_SHIFT - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 				if (src->idr & (1UL << n_ci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 					src->destmask |= 1UL << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			src->output = ILR_INTTGT_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			src->nomask = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			src->destmask = src->idr & normal_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		src->destmask = src->idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				    uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (opp->flags & OPENPIC_FLAG_ILR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		struct irq_source *src = &opp->src[n_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		src->output = val & ILR_INTTGT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			src->output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		/* TODO: on MPIC v4.0 only, set nomask for non-INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 				     uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	uint32_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/* NOTE when implementing newer FSL MPIC models: starting with v4.0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * the polarity bit is read-only on internal interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	    IVPR_POLARITY_MASK | opp->vector_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* ACTIVITY bit is read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	opp->src[n_IRQ].ivpr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	    (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/* For FSL internal interrupts, The sense bit is reserved and zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * and the interrupt is always level-triggered.  Timers and IPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * have no sense or polarity bits, and are edge-triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	switch (opp->src[n_IRQ].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	case IRQ_TYPE_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		opp->src[n_IRQ].level =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		    !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	case IRQ_TYPE_FSLINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	case IRQ_TYPE_FSLSPECIAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	openpic_update_irq(opp, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		opp->src[n_IRQ].ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) static void openpic_gcr_write(struct openpic *opp, uint64_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (val & GCR_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		openpic_reset(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	opp->gcr &= ~opp->mpic_mode_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	opp->gcr |= val & opp->mpic_mode_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	case 0x00:	/* Block Revision Register1 (BRR1) is Readonly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	case 0x40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	case 0x50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	case 0x60:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	case 0x70:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	case 0x80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	case 0x90:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	case 0xA0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	case 0xB0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		err = openpic_cpu_write_internal(opp, addr, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 						 get_current_cpu());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	case 0x1000:		/* FRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	case 0x1020:		/* GCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		openpic_gcr_write(opp, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	case 0x1080:		/* VIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	case 0x1090:		/* PIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		 * This register is used to reset a CPU core --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		 * let userspace handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	case 0x10A0:		/* IPI_IVPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	case 0x10B0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	case 0x10C0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	case 0x10D0: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		idx = (addr - 0x10A0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	case 0x10E0:		/* SPVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		opp->spve = val & opp->vector_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	u32 retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	pr_debug("%s: addr %#llx\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	retval = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	case 0x1000:		/* FRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		retval = opp->frr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	case 0x1020:		/* GCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		retval = opp->gcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	case 0x1080:		/* VIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		retval = opp->vir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	case 0x1090:		/* PIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		retval = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	case 0x00:		/* Block Revision Register1 (BRR1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		retval = opp->brr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	case 0x40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	case 0x50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	case 0x60:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	case 0x70:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	case 0x80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	case 0x90:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	case 0xA0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	case 0xB0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		err = openpic_cpu_read_internal(opp, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			&retval, get_current_cpu());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	case 0x10A0:		/* IPI_IVPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	case 0x10B0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	case 0x10C0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	case 0x10D0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			idx = (addr - 0x10A0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	case 0x10E0:		/* SPVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		retval = opp->spve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	pr_debug("%s: => 0x%08x\n", __func__, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	*ptr = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	addr += 0x10f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (addr == 0x10f0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		/* TFRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		opp->tfrr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	idx = (addr >> 6) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	addr = addr & 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	switch (addr & 0x30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	case 0x00:		/* TCCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	case 0x10:		/* TBCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		if ((opp->timers[idx].tccr & TCCR_TOG) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		    (val & TBCR_CI) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		    (opp->timers[idx].tbcr & TBCR_CI) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			opp->timers[idx].tccr &= ~TCCR_TOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		opp->timers[idx].tbcr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	case 0x20:		/* TVPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	case 0x30:		/* TDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	uint32_t retval = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	pr_debug("%s: addr %#llx\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	idx = (addr >> 6) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (addr == 0x0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		/* TFRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		retval = opp->tfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	switch (addr & 0x30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	case 0x00:		/* TCCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		retval = opp->timers[idx].tccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	case 0x10:		/* TBCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		retval = opp->timers[idx].tbcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	case 0x20:		/* TIPV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	case 0x30:		/* TIDE (TIDR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	pr_debug("%s: => 0x%08x\n", __func__, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	*ptr = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static int openpic_src_write(void *opaque, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	addr = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	idx = addr >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	switch (addr & 0x1f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	case 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		write_IRQreg_ivpr(opp, idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		write_IRQreg_idr(opp, idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	case 0x18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		write_IRQreg_ilr(opp, idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	uint32_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	pr_debug("%s: addr %#llx\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	retval = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	addr = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	idx = addr >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	switch (addr & 0x1f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	case 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		retval = read_IRQreg_ivpr(opp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		retval = read_IRQreg_idr(opp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	case 0x18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		retval = read_IRQreg_ilr(opp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	pr_debug("%s: => 0x%08x\n", __func__, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	*ptr = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static int openpic_msi_write(void *opaque, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	int idx = opp->irq_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	int srs, ibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	case MSIIR_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		srs = val >> MSIIR_SRS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		idx += srs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		opp->msi[srs].msir |= 1 << ibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		openpic_set_irq(opp, idx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		/* most registers are read-only, thus ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	uint32_t r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	int i, srs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	pr_debug("%s: addr %#llx\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	srs = addr >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	case 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	case 0x20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	case 0x30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	case 0x40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	case 0x50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	case 0x60:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	case 0x70:		/* MSIRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		r = opp->msi[srs].msir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		/* Clear on read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		opp->msi[srs].msir = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		openpic_set_irq(opp, opp->irq_msi + srs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	case 0x120:		/* MSISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		for (i = 0; i < MAX_MSI; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			r |= (opp->msi[i].msir ? 1 : 0) << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	pr_debug("%s: => 0x%08x\n", __func__, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	*ptr = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	uint32_t r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	pr_debug("%s: addr %#llx\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	/* TODO: EISR/EIMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	*ptr = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static int openpic_summary_write(void *opaque, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	/* TODO: EISR/EIMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				      u32 val, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct irq_source *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct irq_dest *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	int s_IRQ, n_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	dst = &opp->dst[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	addr &= 0xFF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	case 0x40:		/* IPIDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	case 0x50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	case 0x60:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	case 0x70:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		idx = (addr - 0x40) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		/* we use IDE as mask which CPUs to deliver the IPI to still. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		opp->src[opp->irq_ipi0 + idx].destmask |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	case 0x80:		/* CTPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		dst->ctpr = val & 0x0000000F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			__func__, idx, dst->ctpr, dst->raised.priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			dst->servicing.priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		if (dst->raised.priority <= dst->ctpr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 				__func__, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		} else if (dst->raised.priority > dst->servicing.priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				__func__, idx, dst->raised.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	case 0x90:		/* WHOAMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		/* Read-only register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	case 0xA0:		/* IACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		/* Read-only register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	case 0xB0: {		/* EOI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		int notify_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		pr_debug("EOI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		s_IRQ = IRQ_get_next(opp, &dst->servicing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (s_IRQ < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			pr_debug("%s: EOI with no interrupt in service\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		IRQ_resetbit(&dst->servicing, s_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		/* Notify listeners that the IRQ is over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		notify_eoi = s_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		/* Set up next servicing IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		s_IRQ = IRQ_get_next(opp, &dst->servicing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		/* Check queued interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		n_IRQ = IRQ_get_next(opp, &dst->raised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		src = &opp->src[n_IRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		if (n_IRQ != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		    (s_IRQ == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		     IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				idx, n_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		spin_unlock(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		spin_lock(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	return openpic_cpu_write_internal(opp, addr, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 					 (addr & 0x1f000) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			     int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct irq_source *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	int retval, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	pr_debug("Lower OpenPIC INT output\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	irq = IRQ_get_next(opp, &dst->raised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	pr_debug("IACK: irq=%d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		/* No more interrupt pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		return opp->spve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	src = &opp->src[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	    !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			__func__, irq, dst->ctpr, src->ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		openpic_update_irq(opp, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		retval = opp->spve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		/* IRQ enter servicing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		IRQ_setbit(&dst->servicing, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		retval = IVPR_VECTOR(opp, src->ivpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (!src->level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		/* edge-sensitive IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		src->ivpr &= ~IVPR_ACTIVITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		src->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		IRQ_resetbit(&dst->raised, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		src->destmask &= ~(1 << cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		if (src->destmask && !src->level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			/* trigger on CPUs that didn't know about it yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			openpic_set_irq(opp, irq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			openpic_set_irq(opp, irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			/* if all CPUs knew about it, set active bit again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			src->ivpr |= IVPR_ACTIVITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct openpic *opp = vcpu->arch.mpic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	int cpu = vcpu->arch.irq_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	spin_lock_irqsave(&opp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	spin_unlock_irqrestore(&opp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 				     u32 *ptr, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct irq_dest *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	uint32_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	retval = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (addr & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	dst = &opp->dst[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	addr &= 0xFF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	case 0x80:		/* CTPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		retval = dst->ctpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	case 0x90:		/* WHOAMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		retval = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	case 0xA0:		/* IACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		retval = openpic_iack(opp, dst, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	case 0xB0:		/* EOI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	pr_debug("%s: => 0x%08x\n", __func__, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	*ptr = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	struct openpic *opp = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	return openpic_cpu_read_internal(opp, addr, ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 					 (addr & 0x1f000) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct mem_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	int (*read)(void *opaque, gpa_t addr, u32 *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	int (*write)(void *opaque, gpa_t addr, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	gpa_t start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static const struct mem_reg openpic_gbl_mmio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	.write = openpic_gbl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	.read = openpic_gbl_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	.start_addr = OPENPIC_GLB_REG_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	.size = OPENPIC_GLB_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static const struct mem_reg openpic_tmr_mmio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	.write = openpic_tmr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	.read = openpic_tmr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	.start_addr = OPENPIC_TMR_REG_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	.size = OPENPIC_TMR_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static const struct mem_reg openpic_cpu_mmio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	.write = openpic_cpu_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	.read = openpic_cpu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	.start_addr = OPENPIC_CPU_REG_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	.size = OPENPIC_CPU_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static const struct mem_reg openpic_src_mmio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	.write = openpic_src_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	.read = openpic_src_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	.start_addr = OPENPIC_SRC_REG_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	.size = OPENPIC_SRC_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static const struct mem_reg openpic_msi_mmio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	.read = openpic_msi_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	.write = openpic_msi_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	.start_addr = OPENPIC_MSI_REG_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	.size = OPENPIC_MSI_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static const struct mem_reg openpic_summary_mmio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	.read = openpic_summary_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	.write = openpic_summary_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	.start_addr = OPENPIC_SUMMARY_REG_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	.size = OPENPIC_SUMMARY_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static void add_mmio_region(struct openpic *opp, const struct mem_reg *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (opp->num_mmio_regions >= MAX_MMIO_REGIONS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		WARN(1, "kvm mpic: too many mmio regions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	opp->mmio_regions[opp->num_mmio_regions++] = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static void fsl_common_init(struct openpic *opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	int virq = MAX_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	add_mmio_region(opp, &openpic_msi_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	add_mmio_region(opp, &openpic_summary_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	opp->vid = VID_REVISION_1_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	opp->vir = VIR_GENERIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	opp->vector_mask = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	opp->tfrr_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	opp->ivpr_reset = IVPR_MASK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	opp->idr_reset = 1 << 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	opp->max_irq = MAX_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	opp->irq_ipi0 = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	virq += MAX_IPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	opp->irq_tim0 = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	virq += MAX_TMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	BUG_ON(virq > MAX_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	opp->irq_msi = 224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	for (i = 0; i < opp->fsl->max_ext; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		opp->src[i].level = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	/* Internal interrupts, including message and MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	for (i = 16; i < MAX_SRC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		opp->src[i].type = IRQ_TYPE_FSLINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		opp->src[i].level = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	/* timers and IPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	for (i = MAX_SRC; i < virq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		opp->src[i].level = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	for (i = 0; i < opp->num_mmio_regions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		const struct mem_reg *mr = opp->mmio_regions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		return mr->read(opp, addr - mr->start_addr, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	for (i = 0; i < opp->num_mmio_regions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		const struct mem_reg *mr = opp->mmio_regions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		return mr->write(opp, addr - mr->start_addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static int kvm_mpic_read(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			 struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			 gpa_t addr, int len, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	struct openpic *opp = container_of(this, struct openpic, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		u8 bytes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	} u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (addr & (len - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		pr_debug("%s: bad alignment %llx/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			 __func__, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	spin_lock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	spin_unlock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	 * Technically only 32-bit accesses are allowed, but be nice to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	 * people dumping registers a byte at a time -- it works in real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	 * hardware (reads only, not writes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (len == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		*(u32 *)ptr = u.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		pr_debug("%s: addr %llx ret %d len 4 val %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			 __func__, addr, ret, u.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	} else if (len == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		*(u8 *)ptr = u.bytes[addr & 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		pr_debug("%s: addr %llx ret %d len 1 val %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			 __func__, addr, ret, u.bytes[addr & 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		pr_debug("%s: bad length %d\n", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int kvm_mpic_write(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			  struct kvm_io_device *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			  gpa_t addr, int len, const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	struct openpic *opp = container_of(this, struct openpic, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (len != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		pr_debug("%s: bad length %d\n", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	if (addr & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	spin_lock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 				      *(const u32 *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	spin_unlock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	pr_debug("%s: addr %llx ret %d val %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		 __func__, addr, ret, *(const u32 *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static const struct kvm_io_device_ops mpic_mmio_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	.read = kvm_mpic_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	.write = kvm_mpic_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static void map_mmio(struct openpic *opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 				opp->reg_base, OPENPIC_REG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 				&opp->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void unmap_mmio(struct openpic *opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	u64 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (base & 0x3ffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			 __func__, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (base == opp->reg_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	mutex_lock(&opp->kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	unmap_mmio(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	opp->reg_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		 __func__, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (base == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	map_mmio(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	mutex_unlock(&opp->kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) #define ATTR_SET		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) #define ATTR_GET		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	if (addr & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	spin_lock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (type == ATTR_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		ret = kvm_mpic_write_internal(opp, addr, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		ret = kvm_mpic_read_internal(opp, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	spin_unlock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	struct openpic *opp = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	u32 attr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	case KVM_DEV_MPIC_GRP_MISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		case KVM_DEV_MPIC_BASE_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			return set_base_addr(opp, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	case KVM_DEV_MPIC_GRP_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		if (get_user(attr32, (u32 __user *)(long)attr->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		return access_reg(opp, attr->attr, &attr32, ATTR_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		if (attr->attr > MAX_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		if (get_user(attr32, (u32 __user *)(long)attr->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		if (attr32 != 0 && attr32 != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		spin_lock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		openpic_set_irq(opp, attr->attr, attr32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		spin_unlock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	struct openpic *opp = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	u64 attr64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	u32 attr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	case KVM_DEV_MPIC_GRP_MISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		case KVM_DEV_MPIC_BASE_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 			mutex_lock(&opp->kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 			attr64 = opp->reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			mutex_unlock(&opp->kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			if (copy_to_user((u64 __user *)(long)attr->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 					 &attr64, sizeof(u64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	case KVM_DEV_MPIC_GRP_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		ret = access_reg(opp, attr->attr, &attr32, ATTR_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		if (attr->attr > MAX_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		spin_lock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		attr32 = opp->src[attr->attr].pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		spin_unlock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	case KVM_DEV_MPIC_GRP_MISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		case KVM_DEV_MPIC_BASE_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	case KVM_DEV_MPIC_GRP_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		if (attr->attr > MAX_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static void mpic_destroy(struct kvm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	struct openpic *opp = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	dev->kvm->arch.mpic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	kfree(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static int mpic_set_default_irq_routing(struct openpic *opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	struct kvm_irq_routing_entry *routing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	/* Create a nop default map, so that dereferencing it still works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	routing = kzalloc((sizeof(*routing)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (!routing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	kvm_set_irq_routing(opp->kvm, routing, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	kfree(routing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static int mpic_create(struct kvm_device *dev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	struct openpic *opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	/* We only support one MPIC at a time for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	if (dev->kvm->arch.mpic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	opp = kzalloc(sizeof(struct openpic), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	if (!opp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	dev->private = opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	opp->kvm = dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	opp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	opp->model = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	spin_lock_init(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	add_mmio_region(opp, &openpic_gbl_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	add_mmio_region(opp, &openpic_tmr_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	add_mmio_region(opp, &openpic_src_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	add_mmio_region(opp, &openpic_cpu_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	switch (opp->model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	case KVM_DEV_TYPE_FSL_MPIC_20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		opp->fsl = &fsl_mpic_20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		opp->brr1 = 0x00400200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		opp->flags |= OPENPIC_FLAG_IDR_CRIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		opp->nb_irqs = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		opp->mpic_mode_mask = GCR_MODE_MIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		fsl_common_init(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	case KVM_DEV_TYPE_FSL_MPIC_42:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		opp->fsl = &fsl_mpic_42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		opp->brr1 = 0x00400402;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		opp->flags |= OPENPIC_FLAG_ILR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		opp->nb_irqs = 196;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		opp->mpic_mode_mask = GCR_MODE_PROXY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		fsl_common_init(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	ret = mpic_set_default_irq_routing(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	openpic_reset(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	dev->kvm->arch.mpic = opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	kfree(opp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct kvm_device_ops kvm_mpic_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	.name = "kvm-mpic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	.create = mpic_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	.destroy = mpic_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	.set_attr = mpic_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	.get_attr = mpic_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	.has_attr = mpic_has_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			     u32 cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	struct openpic *opp = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (dev->ops != &kvm_mpic_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	if (opp->kvm != vcpu->kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (cpu < 0 || cpu >= MAX_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	spin_lock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	if (opp->dst[cpu].vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	if (vcpu->arch.irq_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	opp->dst[cpu].vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	vcpu->arch.mpic = opp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	vcpu->arch.irq_cpu_id = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	vcpu->arch.irq_type = KVMPPC_IRQ_MPIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	/* This might need to be changed if GCR gets extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (opp->mpic_mode_mask == GCR_MODE_PROXY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	spin_unlock_irq(&opp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  * This should only happen immediately before the mpic is destroyed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  * so we shouldn't need to worry about anything still trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)  * access the vcpu pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  *  = 0   Interrupt was coalesced (previous irq is still pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  *  > 0   Number of CPUs interrupt was delivered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			struct kvm *kvm, int irq_source_id, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	u32 irq = e->irqchip.pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	struct openpic *opp = kvm->arch.mpic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	spin_lock_irqsave(&opp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	openpic_set_irq(opp, irq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	spin_unlock_irqrestore(&opp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	/* All code paths we care about don't check for the return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		struct kvm *kvm, int irq_source_id, int level, bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	struct openpic *opp = kvm->arch.mpic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	spin_lock_irqsave(&opp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	 * XXX We ignore the target address for now, as we only support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	 *     a single MSI bank.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	spin_unlock_irqrestore(&opp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	/* All code paths we care about don't check for the return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) int kvm_set_routing_entry(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 			  struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			  const struct kvm_irq_routing_entry *ue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	switch (ue->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	case KVM_IRQ_ROUTING_IRQCHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		e->set = mpic_set_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		e->irqchip.pin = ue->u.irqchip.pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	case KVM_IRQ_ROUTING_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		e->set = kvm_set_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		e->msi.address_lo = ue->u.msi.address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		e->msi.address_hi = ue->u.msi.address_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		e->msi.data = ue->u.msi.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }