^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2016,2017 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __XIVE_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __XIVE_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * A "disabled" interrupt should never fire, to catch problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * we set its logical number to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define XIVE_BAD_IRQ 0x7fffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Each CPU carry one of these with various per-CPU state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct xive_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* HW irq number and data of IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 hw_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct xive_irq_data ipi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int chip_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Queue datas. Only one is populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define XIVE_MAX_QUEUES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct xive_q queue[XIVE_MAX_QUEUES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Pending mask. Each bit corresponds to a priority that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * potentially has pending interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u8 pending_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Cache of HW CPPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u8 cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Backend ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct xive_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 *sw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool (*match)(struct device_node *np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void (*shutdown)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void (*update_pending)(struct xive_cpu *xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void (*eoi)(u32 hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void (*sync_source)(u32 hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int (*debug_show)(struct seq_file *m, void *private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u8 max_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int xive_core_debug_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline u32 xive_alloc_order(u32 queue_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) extern bool xive_cmdline_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif /* __XIVE_INTERNAL_H */