^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * <benh@kernel.crashing.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef _ASM_POWERPC_DCR_NATIVE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _ASM_POWERPC_DCR_NATIVE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/cpu_has_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned int base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) } dcr_host_native_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline bool dcr_map_ok_native(dcr_host_native_t host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define dcr_map_native(dev, dcr_n, dcr_c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) ((dcr_host_native_t){ .base = (dcr_n) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define dcr_unmap_native(host, dcr_c) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Table based DCR accessors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void __mtdcr(unsigned int reg, unsigned int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern unsigned int __mfdcr(unsigned int reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* mfdcrx/mtdcrx instruction based accessors. We hand code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * the opcodes in order not to depend on newer binutils
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline unsigned int mfdcrx(unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) : "=r" (ret) : "r" (reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline void mtdcrx(unsigned int reg, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) : : "r" (val), "r" (reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define mfdcr(rn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ({unsigned int rval; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (__builtin_constant_p(rn) && rn < 1024) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) asm volatile("mfdcr %0, %1" : "=r" (rval) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) : "n" (rn)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) rval = mfdcrx(rn); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) rval = __mfdcr(rn); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) rval;})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define mtdcr(rn, v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (__builtin_constant_p(rn) && rn < 1024) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) asm volatile("mtdcr %0, %1" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) : : "n" (rn), "r" (v)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mtdcrx(rn, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __mtdcr(rn, v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* R/W of indirect DCRs make use of standard naming conventions for DCRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) extern spinlock_t dcr_ind_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) spin_lock_irqsave(&dcr_ind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mtdcrx(base_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) val = mfdcrx(base_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __mtdcr(base_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) val = __mfdcr(base_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) spin_unlock_irqrestore(&dcr_ind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline void __mtdcri(int base_addr, int base_data, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_lock_irqsave(&dcr_ind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mtdcrx(base_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mtdcrx(base_data, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __mtdcr(base_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __mtdcr(base_data, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_unlock_irqrestore(&dcr_ind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static inline void __dcri_clrset(int base_addr, int base_data, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned clr, unsigned set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) spin_lock_irqsave(&dcr_ind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mtdcrx(base_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) val = (mfdcrx(base_data) & ~clr) | set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mtdcrx(base_data, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __mtdcr(base_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) val = (__mfdcr(base_data) & ~clr) | set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __mtdcr(base_data, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spin_unlock_irqrestore(&dcr_ind_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define mfdcri(base, reg) __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) DCRN_ ## base ## _CONFIG_DATA, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) DCRN_ ## base ## _CONFIG_DATA, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) reg, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define dcri_clrset(base, reg, clr, set) __dcri_clrset(DCRN_ ## base ## _CONFIG_ADDR, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) DCRN_ ## base ## _CONFIG_DATA, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) reg, clr, set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif /* _ASM_POWERPC_DCR_NATIVE_H */