^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_IA64_ASMMACRO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_IA64_ASMMACRO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define ENTRY(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) .align 32; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) .proc name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define ENTRY_MIN_ALIGN(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) .align 16; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) .proc name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define GLOBAL_ENTRY(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .global name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ENTRY(name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define END(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) .endp name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Helper macros to make unwind directives more readable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* prologue_gr: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define ASM_UNW_PRLG_RP 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ASM_UNW_PRLG_PFS 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ASM_UNW_PRLG_PSP 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define ASM_UNW_PRLG_PR 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Helper macros for accessing user memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * When adding any new .section/.previous entries here, make sure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * unpleasant things will happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .section "__ex_table", "a" // declare section & section attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) # define EX(y,x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .xdata4 "__ex_table", 99f-., y-.; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [99:] x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) # define EXCLR(y,x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .xdata4 "__ex_table", 99f-., y-.+4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) [99:] x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Tag MCA recoverable instruction ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .section "__mca_table", "a" // declare section & section attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) # define MCA_RECOVER_RANGE(y) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .xdata4 "__mca_table", y-., 99f-.; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) [99:]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Mark instructions that need a load of a virtual address patched to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * a load of a physical address. We use this either in critical performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * path (ivt.S - TLB miss processing) or in places where it might not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * safe to use a "tpa" instruction (mca_asm.S - error recovery).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .section ".data..patch.vtop", "a" // declare section & section attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define LOAD_PHYSICAL(pr, reg, obj) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) [1:](pr)movl reg = obj; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .xdata4 ".data..patch.vtop", 1b-.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define DO_MCKINLEY_E9_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #ifdef DO_MCKINLEY_E9_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .section ".data..patch.mckinley_e9", "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* workaround for Itanium 2 Errata 9: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) # define FSYS_RETURN \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) .xdata4 ".data..patch.mckinley_e9", 1f-.; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 1:{ .mib; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) nop.m 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mov r16=ar.pfs; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) br.call.sptk.many b7=2f;; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) 2:{ .mib; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) nop.m 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mov ar.pfs=r16; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) br.ret.sptk.many b6;; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) # define FSYS_RETURN br.ret.sptk.many b6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * If physical stack register size is different from DEF_NUM_STACK_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * dynamically patch the kernel for correct size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .section ".data..patch.phys_stack_reg", "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define LOAD_PHYS_STACK_REG_SIZE(reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .xdata4 ".data..patch.phys_stack_reg", 1b-.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Up until early 2004, use of .align within a function caused bad unwind info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #ifdef HAVE_WORKING_TEXT_ALIGN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) # define TEXT_ALIGN(n) .align n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) # define TEXT_ALIGN(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #ifdef HAVE_SERIALIZE_DIRECTIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) # define dv_serialize_data .serialize.data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) # define dv_serialize_instruction .serialize.instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) # define dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) # define dv_serialize_instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif /* _ASM_IA64_ASMMACRO_H */