Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  Copyright (C) 1994  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *                stack - Manfred Spraul <manfred@colorfullife.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *                them correctly. Now the emulation will be in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *                consistent state after stackfaults - Kasper Dupont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *                <kasperd@daimi.au.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *                <kasperd@daimi.au.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *                caused by Kasper Dupont's changes - Stas Sergeev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *                Kasper Dupont <kasperd@daimi.au.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *                Kasper Dupont <kasperd@daimi.au.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *   9 apr 2002 - Changed stack access macros to jump to a label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *                instead of returning to userspace. This simplifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *                do_int, and is needed by handle_vm6_fault. Kasper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *                Dupont <kasperd@daimi.au.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <asm/vm86.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Known problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * Interrupt handling is not guaranteed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * - a real x86 will disable all interrupts for one instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *   after a "mov ss,xx" to make stack handling atomic even without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *   the 'lss' instruction. We can't guarantee this in v86 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *   as the next instruction might result in a page fault or similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * - a real x86 will have interrupts disabled for one instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *   past the 'sti' that enables them. We don't bother with all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *   details yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * Let's hope these problems do not actually matter for anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * 8- and 16-bit register defines..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define AL(regs)	(((unsigned char *)&((regs)->pt.ax))[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define AH(regs)	(((unsigned char *)&((regs)->pt.ax))[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define IP(regs)	(*(unsigned short *)&((regs)->pt.ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define SP(regs)	(*(unsigned short *)&((regs)->pt.sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * virtual flags (16 and 32-bit versions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define VFLAGS	(*(unsigned short *)&(current->thread.vm86->veflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define VEFLAGS	(current->thread.vm86->veflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define set_flags(X, new, mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) ((X) = ((X) & ~(mask)) | ((new) & (mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define SAFE_MASK	(0xDD5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define RETURN_MASK	(0xDFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) void save_v86_state(struct kernel_vm86_regs *regs, int retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct vm86plus_struct __user *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct vm86 *vm86 = current->thread.vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 * This gets called from entry.S with interrupts disabled, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * from process context. Enable interrupts here, before trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * to access user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	if (!vm86 || !vm86->user_vm86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		pr_alert("no user_vm86: BAD\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	user = vm86->user_vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		       sizeof(struct vm86plus_struct) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		       sizeof(struct vm86_struct)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		goto Efault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsafe_put_user(regs->es, &user->regs.es, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsafe_put_user(regs->ds, &user->regs.ds, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	unsafe_put_user(regs->fs, &user->regs.fs, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	unsafe_put_user(regs->gs, &user->regs.gs, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	unsafe_put_user(vm86->screen_bitmap, &user->screen_bitmap, Efault_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	user_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	tsk->thread.sp0 = vm86->saved_sp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	tsk->thread.sysenter_cs = __KERNEL_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	update_task_stack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	refresh_sysenter_cs(&tsk->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	vm86->saved_sp0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	lazy_load_gs(vm86->regs32.gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	regs->pt.ax = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) Efault_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	user_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) Efault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	pr_alert("could not access userspace vm86 info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void mark_screen_rdonly(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	pgd = pgd_offset(mm, 0xA0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	if (pgd_none_or_clear_bad(pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	p4d = p4d_offset(pgd, 0xA0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (p4d_none_or_clear_bad(p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	pud = pud_offset(p4d, 0xA0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (pud_none_or_clear_bad(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	pmd = pmd_offset(pud, 0xA0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (pmd_trans_huge(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		vma = find_vma(mm, 0xA0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		split_huge_pmd(vma, pmd, 0xA0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (pmd_none_or_clear_bad(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		if (pte_present(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			set_pte(pte, pte_wrprotect(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	pte_unmap_unlock(pte, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int do_vm86_irq_handling(int subfunction, int irqnumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	case VM86_REQUEST_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	case VM86_FREE_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	case VM86_GET_IRQ_BITS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	case VM86_GET_AND_RESET_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return do_vm86_irq_handling(cmd, (int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	case VM86_PLUS_INSTALL_CHECK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		 * NOTE: on old vm86 stuff this will return the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		 *  from access_ok(), because the subfunction is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		 *  interpreted as (invalid) address to vm86_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		 *  So the installation check works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct vm86 *vm86 = tsk->thread.vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct kernel_vm86_regs vm86regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	unsigned long err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct vm86_struct v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	err = security_mmap_addr(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		 * vm86 cannot virtualize the address space, so vm86 users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		 * need to manage the low 1MB themselves using mmap.  Given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		 * that BIOS places important data in the first page, vm86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		 * is essentially useless if mmap_min_addr != 0.  DOSEMU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		 * for example, won't even bother trying to use vm86 if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		 * can't map a page at virtual address 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		 * To reduce the available kernel attack surface, simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		 * disallow vm86(old) for users who cannot mmap at va 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		 * The implementation of security_mmap_addr will allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		 * suitably privileged users to map va 0 even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		 * vm.mmap_min_addr is set above 0, and we want this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		 * behavior for vm86 as well, as it ensures that legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		 * tools like vbetool will not fail just because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		 * vm.mmap_min_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d).  Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			     current->comm, task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			     from_kuid_munged(&init_user_ns, current_uid()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (!vm86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		tsk->thread.vm86 = vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (vm86->saved_sp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (copy_from_user(&v, user_vm86,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			offsetof(struct vm86_struct, int_revectored)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	memset(&vm86regs, 0, sizeof(vm86regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	vm86regs.pt.bx = v.regs.ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	vm86regs.pt.cx = v.regs.ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	vm86regs.pt.dx = v.regs.edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	vm86regs.pt.si = v.regs.esi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	vm86regs.pt.di = v.regs.edi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	vm86regs.pt.bp = v.regs.ebp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	vm86regs.pt.ax = v.regs.eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	vm86regs.pt.ip = v.regs.eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	vm86regs.pt.cs = v.regs.cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	vm86regs.pt.flags = v.regs.eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	vm86regs.pt.sp = v.regs.esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	vm86regs.pt.ss = v.regs.ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	vm86regs.es = v.regs.es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	vm86regs.ds = v.regs.ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	vm86regs.fs = v.regs.fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	vm86regs.gs = v.regs.gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	vm86->flags = v.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	vm86->screen_bitmap = v.screen_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	vm86->cpu_type = v.cpu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (copy_from_user(&vm86->int_revectored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			   &user_vm86->int_revectored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			   sizeof(struct revectored_struct)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (copy_from_user(&vm86->int21_revectored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			   &user_vm86->int21_revectored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			   sizeof(struct revectored_struct)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 				   sizeof(struct vm86plus_info_struct)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		vm86->vm86plus.is_vm86pus = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		memset(&vm86->vm86plus, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		       sizeof(struct vm86plus_info_struct));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	vm86->user_vm86 = user_vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * The flags register is also special: we cannot trust that the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * has set it up safely, so this makes sure interrupt etc flags are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  * inherited from protected mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	VEFLAGS = vm86regs.pt.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	vm86regs.pt.flags &= SAFE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	vm86regs.pt.flags |= X86_VM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	vm86regs.pt.orig_ax = regs->orig_ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	switch (vm86->cpu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	case CPU_286:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		vm86->veflags_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	case CPU_386:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	case CPU_486:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * Save old state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	vm86->saved_sp0 = tsk->thread.sp0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	lazy_save_gs(vm86->regs32.gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	/* make room for real-mode segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	tsk->thread.sp0 += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (boot_cpu_has(X86_FEATURE_SEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		tsk->thread.sysenter_cs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		refresh_sysenter_cs(&tsk->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	update_task_stack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (vm86->flags & VM86_SCREEN_BITMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		mark_screen_rdonly(tsk->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	return regs->ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static inline void set_IF(struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	VEFLAGS |= X86_EFLAGS_VIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static inline void clear_IF(struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	VEFLAGS &= ~X86_EFLAGS_VIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static inline void clear_TF(struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	regs->pt.flags &= ~X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static inline void clear_AC(struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	regs->pt.flags &= ~X86_EFLAGS_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * It is correct to call set_IF(regs) from the set_vflags_*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * functions. However someone forgot to call clear_IF(regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  * in the opposite case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  * After the command sequence CLI PUSHF STI POPF you should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * end up with interrupts disabled, but you ended up with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * interrupts enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  *  ( I was testing my own changes, but the only bug I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  *    could find was in a function I had not changed. )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)  * [KD]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	set_flags(regs->pt.flags, flags, SAFE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (flags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		set_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		clear_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	set_flags(regs->pt.flags, flags, SAFE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (flags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		set_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		clear_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	unsigned long flags = regs->pt.flags & RETURN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (VEFLAGS & X86_EFLAGS_VIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		flags |= X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	flags |= X86_EFLAGS_IOPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static inline int is_revectored(int nr, struct revectored_struct *bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	return test_bit(nr, bitmap->__map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #define val_byte(val, n) (((__u8 *)&val)[n])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #define pushb(base, ptr, val, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		__u8 __val = val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		if (put_user(__val, base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) #define pushw(base, ptr, val, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		__u16 __val = val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #define pushl(base, ptr, val, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		__u32 __val = val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		if (put_user(val_byte(__val, 3), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		if (put_user(val_byte(__val, 2), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		ptr--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #define popb(base, ptr, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		__u8 __res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		if (get_user(__res, base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		__res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #define popw(base, ptr, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		__u16 __res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		__res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #define popl(base, ptr, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		__u32 __res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		if (get_user(val_byte(__res, 2), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		if (get_user(val_byte(__res, 3), base + ptr) < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		ptr++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		__res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* There are so many possible reasons for this function to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  * VM86_INTx, so adding another doesn't bother me. We can expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)  * userspace programs to be able to handle it. (Getting a problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  * in userspace is always better than an Oops anyway.) [KD]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void do_int(struct kernel_vm86_regs *regs, int i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)     unsigned char __user *ssp, unsigned short sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	unsigned long __user *intr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	unsigned long segoffs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct vm86 *vm86 = current->thread.vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	if (regs->pt.cs == BIOSSEG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		goto cannot_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (is_revectored(i, &vm86->int_revectored))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		goto cannot_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		goto cannot_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	intr_ptr = (unsigned long __user *) (i << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (get_user(segoffs, intr_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		goto cannot_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	if ((segoffs >> 16) == BIOSSEG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		goto cannot_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	pushw(ssp, sp, get_vflags(regs), cannot_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	pushw(ssp, sp, regs->pt.cs, cannot_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	pushw(ssp, sp, IP(regs), cannot_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	regs->pt.cs = segoffs >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	SP(regs) -= 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	IP(regs) = segoffs & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	clear_TF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	clear_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	clear_AC(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) cannot_handle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	save_v86_state(regs, VM86_INTx + (i << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct vm86 *vm86 = current->thread.vm86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (vm86->vm86plus.is_vm86pus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		if ((trapno == 3) || (trapno == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 			save_v86_state(regs, VM86_TRAP + (trapno << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	if (trapno != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		return 1; /* we let this handle by the calling routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	current->thread.trap_nr = trapno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	current->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	force_sig(SIGTRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	unsigned char opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	unsigned char __user *csp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	unsigned char __user *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	unsigned short ip, sp, orig_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	int data32, pref_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #define CHECK_IF_IN_TRAP \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		newflags |= X86_EFLAGS_TF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	orig_flags = *(unsigned short *)&regs->pt.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	csp = (unsigned char __user *) (regs->pt.cs << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	ssp = (unsigned char __user *) (regs->pt.ss << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	sp = SP(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	ip = IP(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	data32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	pref_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		switch (opcode = popb(csp, ip, simulate_sigsegv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		case 0x66:      /* 32-bit data */     data32 = 1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		case 0x67:      /* 32-bit address */  break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		case 0x2e:      /* CS */              break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		case 0x3e:      /* DS */              break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		case 0x26:      /* ES */              break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		case 0x36:      /* SS */              break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		case 0x65:      /* GS */              break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		case 0x64:      /* FS */              break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		case 0xf2:      /* repnz */       break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		case 0xf3:      /* rep */             break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		default: pref_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	} while (!pref_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	/* pushf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	case 0x9c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		if (data32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 			SP(regs) -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 			pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 			SP(regs) -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		IP(regs) = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		goto vm86_fault_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	/* popf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	case 0x9d:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		unsigned long newflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		if (data32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 			newflags = popl(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 			SP(regs) += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 			newflags = popw(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 			SP(regs) += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		IP(regs) = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		CHECK_IF_IN_TRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		if (data32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			set_vflags_long(newflags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			set_vflags_short(newflags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		goto check_vip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	/* int xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	case 0xcd: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		int intno = popb(csp, ip, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		IP(regs) = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		if (vmpi->vm86dbg_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 			if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 				save_v86_state(regs, VM86_INTx + (intno << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		do_int(regs, intno, ssp, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	/* iret */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	case 0xcf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		unsigned long newip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		unsigned long newcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		unsigned long newflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		if (data32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 			newip = popl(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 			newcs = popl(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 			newflags = popl(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 			SP(regs) += 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 			newip = popw(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 			newcs = popw(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 			newflags = popw(ssp, sp, simulate_sigsegv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 			SP(regs) += 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		IP(regs) = newip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		regs->pt.cs = newcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		CHECK_IF_IN_TRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		if (data32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 			set_vflags_long(newflags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 			set_vflags_short(newflags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		goto check_vip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	/* cli */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	case 0xfa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		IP(regs) = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		clear_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		goto vm86_fault_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	/* sti */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	 * Damn. This is incorrect: the 'sti' instruction should actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	 * enable interrupts after the /next/ instruction. Not good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	 * Probably needs some horsing around with the TF flag. Aiee..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	case 0xfb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		IP(regs) = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		set_IF(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		goto check_vip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		save_v86_state(regs, VM86_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) check_vip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	    (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		save_v86_state(regs, VM86_STI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) vm86_fault_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	if (vmpi->force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		save_v86_state(regs, VM86_PICRETURN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	if (orig_flags & X86_EFLAGS_TF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		handle_vm86_trap(regs, 0, X86_TRAP_DB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) simulate_sigsegv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	/* FIXME: After a long discussion with Stas we finally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	 *        agreed, that this is wrong. Here we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	 *        really send a SIGSEGV to the user program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	 *        But how do we create the correct context? We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	 *        are inside a general protection fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	 *        and has just returned from a page fault handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	 *        The correct context for the signal handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	 *        should be a mixture of the two, but how do we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	 *        get the information? [KD]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	save_v86_state(regs, VM86_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* ---------------- vm86 special IRQ passing stuff ----------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) #define VM86_IRQNAME		"vm86irq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static struct vm86_irqs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	int sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) } vm86_irqs[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static DEFINE_SPINLOCK(irqbits_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static int irqbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	| (1 << SIGUNUSED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static irqreturn_t irq_handler(int intno, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	int irq_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	spin_lock_irqsave(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	irq_bit = 1 << intno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	irqbits |= irq_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	if (vm86_irqs[intno].sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	 * IRQ will be re-enabled when user asks for the irq (whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	 * polling or as a result of the signal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	disable_irq_nosync(intno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	spin_unlock_irqrestore(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	spin_unlock_irqrestore(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static inline void free_vm86_irq(int irqnumber)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	free_irq(irqnumber, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	vm86_irqs[irqnumber].tsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	spin_lock_irqsave(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	irqbits &= ~(1 << irqnumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	spin_unlock_irqrestore(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) void release_vm86_irqs(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	    if (vm86_irqs[i].tsk == task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 		free_vm86_irq(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static inline int get_and_reset_irq(int irqnumber)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	if (invalid_vm86_irq(irqnumber)) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	if (vm86_irqs[irqnumber].tsk != current) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	spin_lock_irqsave(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	bit = irqbits & (1 << irqnumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	irqbits &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	if (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 		enable_irq(irqnumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	spin_unlock_irqrestore(&irqbits_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int do_vm86_irq_handling(int subfunction, int irqnumber)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	switch (subfunction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		case VM86_GET_AND_RESET_IRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 			return get_and_reset_irq(irqnumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		case VM86_GET_IRQ_BITS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 			return irqbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		case VM86_REQUEST_IRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 			int sig = irqnumber >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 			int irq = irqnumber & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 			if (!capable(CAP_SYS_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 			if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 			if (invalid_vm86_irq(irq)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 			if (vm86_irqs[irq].tsk) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 			ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 			if (ret) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 			vm86_irqs[irq].sig = sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 			vm86_irqs[irq].tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 			return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 		case  VM86_FREE_IRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 			if (invalid_vm86_irq(irqnumber)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 			if (!vm86_irqs[irqnumber].tsk) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 			if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 			free_vm86_irq(irqnumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)