^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_VM86_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_VM86_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <uapi/asm/vm86.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * mode - the main change is that the old segment descriptors aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * useful any more and are forced to be zero by the kernel (and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * hardware when a trap occurs), and the real segment descriptors are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * at the end of the structure. Look at ptrace.h to see the "normal"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * setup. For user space layout see 'struct vm86_regs' above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct kernel_vm86_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * normal regs, with special meaning for the segment descriptors..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct pt_regs pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * these are specific to v86 mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned short es, __esh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned short ds, __dsh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned short fs, __fsh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned short gs, __gsh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct vm86 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct vm86plus_struct __user *user_vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct pt_regs regs32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long veflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long veflags_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long saved_sp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long screen_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long cpu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct revectored_struct int_revectored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct revectored_struct int21_revectored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct vm86plus_info_struct vm86plus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef CONFIG_VM86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void handle_vm86_fault(struct kernel_vm86_regs *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void save_v86_state(struct kernel_vm86_regs *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define free_vm86(t) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct thread_struct *__t = (t); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (__t->vm86 != NULL) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) kfree(__t->vm86); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __t->vm86 = NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Support for VM86 programs to request interrupts for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * real mode hardware drivers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define FIRST_VM86_IRQ 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define LAST_VM86_IRQ 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline int invalid_vm86_irq(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void release_vm86_irqs(struct task_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define handle_vm86_fault(a, b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define release_vm86_irqs(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define free_vm86(t) do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif /* CONFIG_VM86 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif /* _ASM_X86_VM86_H */