^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * emulate.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2005 Keir Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Linux coding style, mod r/m decoder, segment base fixes, real-mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * privileged instructions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2006 Qumranet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Avi Kivity <avi@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Yaniv Kamay <yaniv@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "kvm_cache_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "kvm_emulate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "tss.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "pmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Operand types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define OpNone 0ull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define OpImplicit 1ull /* No generic decode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define OpReg 2ull /* Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define OpMem 3ull /* Memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define OpDI 5ull /* ES:DI/EDI/RDI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define OpMem64 6ull /* Memory, 64-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define OpDX 8ull /* DX register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define OpCL 9ull /* CL register (for shifts) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define OpImmByte 10ull /* 8-bit sign extended immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define OpOne 11ull /* Implied 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define OpImm 12ull /* Sign extended up to 32-bit immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define OpMem16 13ull /* Memory operand (16-bit). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define OpMem32 14ull /* Memory operand (32-bit). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define OpImmU 15ull /* Immediate operand, zero extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define OpSI 16ull /* SI/ESI/RSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define OpImmFAddr 17ull /* Immediate far address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define OpMemFAddr 18ull /* Far address in memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define OpES 20ull /* ES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define OpCS 21ull /* CS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define OpSS 22ull /* SS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define OpDS 23ull /* DS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define OpFS 24ull /* FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define OpGS 25ull /* GS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define OpMem8 26ull /* 8-bit zero extended memory operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define OpBits 5 /* Width of operand field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define OpMask ((1ull << OpBits) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Opcode effective-address decode tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Note that we only emulate instructions that have at least one memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * operand (excluding implicit stack references). We assume that stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * references and instruction fetches will never occur in special memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * not be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Operand sizes: 8-bit operands or specified/overridden size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define ByteOp (1<<0) /* 8-bit operands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Destination operand type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define DstShift 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define ImplicitOps (OpImplicit << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define DstReg (OpReg << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define DstMem (OpMem << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define DstAcc (OpAcc << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define DstDI (OpDI << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define DstMem64 (OpMem64 << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define DstMem16 (OpMem16 << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define DstImmUByte (OpImmUByte << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define DstDX (OpDX << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define DstAccLo (OpAccLo << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define DstMask (OpMask << DstShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Source operand type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SrcShift 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SrcNone (OpNone << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define SrcReg (OpReg << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SrcMem (OpMem << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SrcMem16 (OpMem16 << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define SrcMem32 (OpMem32 << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define SrcImm (OpImm << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define SrcImmByte (OpImmByte << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define SrcOne (OpOne << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define SrcImmUByte (OpImmUByte << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define SrcImmU (OpImmU << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define SrcSI (OpSI << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SrcXLat (OpXLat << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define SrcImmFAddr (OpImmFAddr << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define SrcMemFAddr (OpMemFAddr << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SrcAcc (OpAcc << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SrcImmU16 (OpImmU16 << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SrcImm64 (OpImm64 << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define SrcDX (OpDX << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SrcMem8 (OpMem8 << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define SrcAccHi (OpAccHi << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SrcMask (OpMask << SrcShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define BitOp (1<<11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MemAbs (1<<12) /* Memory operand is absolute displacement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define String (1<<13) /* String instruction (rep capable) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define Stack (1<<14) /* Stack instruction (push/pop) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define Escape (5<<15) /* Escape to coprocessor instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define Sse (1<<18) /* SSE Vector instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Generic ModRM decode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define ModRM (1<<19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Destination is only written; never read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define Mov (1<<20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Misc flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define Undefined (1<<25) /* No Such Instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define Lock (1<<26) /* lock prefix is allowed for the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define No64 (1<<28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define PageTable (1 << 29) /* instruction used to write page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define NotImpl (1 << 30) /* instruction is not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Source 2 operand type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define Src2Shift (31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define Src2None (OpNone << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define Src2Mem (OpMem << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define Src2CL (OpCL << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define Src2ImmByte (OpImmByte << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define Src2One (OpOne << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define Src2Imm (OpImm << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define Src2ES (OpES << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define Src2CS (OpCS << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define Src2SS (OpSS << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define Src2DS (OpDS << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define Src2FS (OpFS << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define Src2GS (OpGS << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define Src2Mask (OpMask << Src2Shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define AlignMask ((u64)7 << 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define NoWrite ((u64)1 << 45) /* No writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define SrcWrite ((u64)1 << 46) /* Write back src operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define NoMod ((u64)1 << 47) /* Mod field is ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define Intercept ((u64)1 << 48) /* Has valid intercept field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define NearBranch ((u64)1 << 52) /* Near branches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define No16 ((u64)1 << 53) /* No 16 bit operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define X2(x...) x, x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define X3(x...) X2(x), x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define X4(x...) X2(x), X2(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define X5(x...) X4(x), x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define X6(x...) X4(x), X2(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define X7(x...) X4(x), X3(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define X8(x...) X4(x), X4(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define X16(x...) X8(x), X8(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define FASTOP_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct opcode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u64 flags : 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u64 intercept : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int (*execute)(struct x86_emulate_ctxt *ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) const struct opcode *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const struct group_dual *gdual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct gprefix *gprefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) const struct escape *esc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) const struct instr_dual *idual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) const struct mode_dual *mdual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void (*fastop)(struct fastop *fake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int (*check_perm)(struct x86_emulate_ctxt *ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct group_dual {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct opcode mod012[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct opcode mod3[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct gprefix {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct opcode pfx_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct opcode pfx_66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct opcode pfx_f2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct opcode pfx_f3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct escape {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct opcode op[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct opcode high[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct instr_dual {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct opcode mod012;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct opcode mod3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct mode_dual {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct opcode mode32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct opcode mode64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) enum x86_transfer_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) X86_TRANSFER_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) X86_TRANSFER_CALL_JMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) X86_TRANSFER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) X86_TRANSFER_TASK_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!(ctxt->regs_valid & (1 << nr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ctxt->regs_valid |= 1 << nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ctxt->_regs[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ctxt->regs_valid |= 1 << nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ctxt->regs_dirty |= 1 << nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return &ctxt->_regs[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) reg_read(ctxt, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return reg_write(ctxt, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void writeback_registers(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ctxt->regs_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ctxt->regs_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * These EFLAGS bits are restored from saved value during emulation, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * any changes are written back to the saved value after emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) X86_EFLAGS_PF|X86_EFLAGS_CF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define ON64(x) x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #define ON64(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * fastop functions have a special calling convention:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * dst: rax (in/out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * src: rdx (in/out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * src2: rcx (in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * flags: rflags (in/out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * ex: rsi (in:fastop pointer, out:zero if exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * different operand sizes can be reached by calculation, rather than a jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * table (which would be bigger than the code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define __FOP_FUNC(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ".align " __stringify(FASTOP_SIZE) " \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ".type " name ", @function \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) name ":\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define FOP_FUNC(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __FOP_FUNC(#name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define __FOP_RET(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) "ret \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ".size " name ", .-" name "\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define FOP_RET(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) __FOP_RET(#name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define FOP_START(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) extern void em_##op(struct fastop *fake); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) asm(".pushsection .text, \"ax\" \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ".global em_" #op " \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ".align " __stringify(FASTOP_SIZE) " \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) "em_" #op ":\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define FOP_END \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ".popsection")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #define __FOPNOP(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) __FOP_FUNC(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __FOP_RET(name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define FOPNOP() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) __FOPNOP(__stringify(__UNIQUE_ID(nop)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #define FOP1E(op, dst) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) __FOP_FUNC(#op "_" #dst) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) "10: " #op " %" #dst " \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) __FOP_RET(#op "_" #dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define FOP1EEX(op, dst) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define FASTOP1(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) FOP_START(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) FOP1E(op##b, al) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) FOP1E(op##w, ax) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) FOP1E(op##l, eax) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ON64(FOP1E(op##q, rax)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* 1-operand, using src2 (for MUL/DIV r/m) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #define FASTOP1SRC2(op, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) FOP_START(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) FOP1E(op, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) FOP1E(op, cx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) FOP1E(op, ecx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ON64(FOP1E(op, rcx)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define FASTOP1SRC2EX(op, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) FOP_START(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) FOP1EEX(op, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) FOP1EEX(op, cx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) FOP1EEX(op, ecx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ON64(FOP1EEX(op, rcx)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define FOP2E(op, dst, src) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __FOP_FUNC(#op "_" #dst "_" #src) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #op " %" #src ", %" #dst " \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __FOP_RET(#op "_" #dst "_" #src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define FASTOP2(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) FOP_START(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) FOP2E(op##b, al, dl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) FOP2E(op##w, ax, dx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) FOP2E(op##l, eax, edx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ON64(FOP2E(op##q, rax, rdx)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* 2 operand, word only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define FASTOP2W(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) FOP_START(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) FOPNOP() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) FOP2E(op##w, ax, dx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) FOP2E(op##l, eax, edx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ON64(FOP2E(op##q, rax, rdx)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* 2 operand, src is CL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) #define FASTOP2CL(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) FOP_START(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) FOP2E(op##b, al, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) FOP2E(op##w, ax, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) FOP2E(op##l, eax, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ON64(FOP2E(op##q, rax, cl)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* 2 operand, src and dest are reversed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #define FASTOP2R(op, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) FOP_START(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) FOP2E(op##b, dl, al) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) FOP2E(op##w, dx, ax) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) FOP2E(op##l, edx, eax) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ON64(FOP2E(op##q, rdx, rax)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #define FOP3E(op, dst, src, src2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __FOP_RET(#op "_" #dst "_" #src "_" #src2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* 3-operand, word-only, src2=cl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #define FASTOP3WCL(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) FOP_START(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) FOPNOP() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) FOP3E(op##w, ax, dx, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) FOP3E(op##l, eax, edx, cl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ON64(FOP3E(op##q, rax, rdx, cl)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) FOP_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* Special case for SETcc - 1 instruction per cc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #define FOP_SETCC(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ".align 4 \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ".type " #op ", @function \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) #op ": \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #op " %al \n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) __FOP_RET(#op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) asm(".pushsection .fixup, \"ax\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ".global kvm_fastop_exception \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) "kvm_fastop_exception: xor %esi, %esi; ret\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ".popsection");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) FOP_START(setcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) FOP_SETCC(seto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) FOP_SETCC(setno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) FOP_SETCC(setc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) FOP_SETCC(setnc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) FOP_SETCC(setz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) FOP_SETCC(setnz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) FOP_SETCC(setbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) FOP_SETCC(setnbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) FOP_SETCC(sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) FOP_SETCC(setns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) FOP_SETCC(setp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) FOP_SETCC(setnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) FOP_SETCC(setl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) FOP_SETCC(setnl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) FOP_SETCC(setle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) FOP_SETCC(setnle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) FOP_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) FOP_START(salc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) FOP_FUNC(salc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) "pushf; sbb %al, %al; popf \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) FOP_RET(salc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) FOP_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * XXX: inoutclob user must know where the argument is being expanded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #define asm_safe(insn, inoutclob...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int _fault = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) asm volatile("1:" insn "\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ".pushsection .fixup, \"ax\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "3: movl $1, %[_fault]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) " jmp 2b\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ".popsection\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) _ASM_EXTABLE(1b, 3b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) : [_fault] "+qm"(_fault) inoutclob ); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) enum x86_intercept intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) enum x86_intercept_stage stage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct x86_instruction_info info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .intercept = intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .rep_prefix = ctxt->rep_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .modrm_mod = ctxt->modrm_mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .modrm_reg = ctxt->modrm_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .modrm_rm = ctxt->modrm_rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .src_val = ctxt->src.val64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .dst_val = ctxt->dst.val64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .src_bytes = ctxt->src.bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .dst_bytes = ctxt->dst.bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .ad_bytes = ctxt->ad_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .next_rip = ctxt->eip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return ctxt->ops->intercept(ctxt, &info, stage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void assign_masked(ulong *dest, ulong src, ulong mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *dest = (*dest & ~mask) | (src & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void assign_register(unsigned long *reg, u64 val, int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) switch (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *(u8 *)reg = (u8)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) *(u16 *)reg = (u16)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *reg = (u32)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break; /* 64b: zero-extend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return (1UL << (ctxt->ad_bytes << 3)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) u16 sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct desc_struct ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int stack_size(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return (__fls(stack_mask(ctxt)) + 1) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Access/update address held in a register, based on addressing mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (ctxt->ad_bytes == sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return reg & ad_mask(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) register_address(struct x86_emulate_ctxt *ctxt, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return address_mask(ctxt, reg_read(ctxt, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void masked_increment(ulong *reg, ulong mask, int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) assign_masked(reg, *reg + inc, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ulong *preg = reg_rmw(ctxt, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) assign_register(preg, *preg + inc, ctxt->ad_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static u32 desc_limit_scaled(struct desc_struct *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u32 limit = get_desc_limit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return desc->g ? (limit << 12) | 0xfff : limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return ctxt->ops->get_cached_segment_base(ctxt, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u32 error, bool valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) WARN_ON(vec > 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ctxt->exception.vector = vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ctxt->exception.error_code = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ctxt->exception.error_code_valid = valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return X86EMUL_PROPAGATE_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static int emulate_db(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return emulate_exception(ctxt, DB_VECTOR, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return emulate_exception(ctxt, GP_VECTOR, err, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return emulate_exception(ctxt, SS_VECTOR, err, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static int emulate_ud(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return emulate_exception(ctxt, UD_VECTOR, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return emulate_exception(ctxt, TS_VECTOR, err, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static int emulate_de(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return emulate_exception(ctxt, DE_VECTOR, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static int emulate_nm(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return emulate_exception(ctxt, NM_VECTOR, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u16 selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) u32 base3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static inline bool emul_is_noncanonical_address(u64 la,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * x86 defines three classes of vector instructions: explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * aligned, explicitly unaligned, and the rest, which change behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * depending on whether they're AVX encoded or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Also included is CMPXCHG16B which is not a vector instruction, yet it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * 512 bytes of data must be aligned to a 16 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) u64 alignment = ctxt->d & AlignMask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (likely(size < 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) switch (alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) case Unaligned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) case Avx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case Aligned16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) case Aligned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned *max_size, unsigned size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bool write, bool fetch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) enum x86emul_mode mode, ulong *linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bool usable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ulong la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) u32 lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u16 sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) u8 va_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) la = seg_base(ctxt, addr.seg) + addr.ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) *max_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) case X86EMUL_MODE_PROT64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) *linear = la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) va_bits = ctxt_virt_addr_bits(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (get_canonical(la, va_bits) != la)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (size > *max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) *linear = la = (u32)la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) addr.seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!usable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* code segment in protected mode or read-only data segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) || !(desc.type & 2)) && write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* unreadable code segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (!fetch && (desc.type & 8) && !(desc.type & 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) lim = desc_limit_scaled(&desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!(desc.type & 8) && (desc.type & 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* expand-down segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (addr.ea <= lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) lim = desc.d ? 0xffffffff : 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (addr.ea > lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (lim == 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) *max_size = ~0u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) *max_size = (u64)lim + 1 - addr.ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (size > *max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (la & (insn_alignment(ctxt, size) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (addr.seg == VCPU_SREG_SS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return emulate_ss(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static int linearize(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) unsigned size, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ulong *linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) unsigned max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return __linearize(ctxt, addr, &max_size, size, write, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ctxt->mode, linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) enum x86emul_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct segmented_address addr = { .seg = VCPU_SREG_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .ea = dst };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (ctxt->op_bytes != sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (rc == X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ctxt->_eip = addr.ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return assign_eip(ctxt, dst, ctxt->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) const struct desc_struct *cs_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) enum x86emul_mode mode = ctxt->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (ctxt->mode >= X86EMUL_MODE_PROT16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (cs_desc->l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) u64 efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (efer & EFER_LMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mode = X86EMUL_MODE_PROT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) mode = X86EMUL_MODE_PROT32; /* temporary value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rc = assign_eip(ctxt, dst, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (rc == X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ctxt->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return assign_eip_near(ctxt, ctxt->_eip + rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) void *data, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int linear_write_system(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ulong linear, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rc = linearize(ctxt, addr, size, false, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) rc = linearize(ctxt, addr, size, true, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Prefetch the remaining bytes of the instruction without crossing page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * boundary if they are not in fetch_cache yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned size, max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) unsigned long linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int cur_size = ctxt->fetch.end - ctxt->fetch.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct segmented_address addr = { .seg = VCPU_SREG_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .ea = ctxt->eip + cur_size };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * We do not know exactly how many bytes will be needed, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * __linearize is expensive, so fetch as much as possible. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * just have to avoid going beyond the 15 byte limit, the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * of the segment, or the end of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * __linearize is called with size 0 so that it does not do any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * boundary check itself. Instead, we use max_size to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * against op_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (unlikely(rc != X86EMUL_CONTINUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) size = min_t(unsigned, 15UL ^ cur_size, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * One instruction can only straddle two pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * and one has been loaded at the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * x86_decode_insn. So, if not enough bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * still, we must have hit the 15-byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (unlikely(size < op_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) size, &ctxt->exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (unlikely(rc != X86EMUL_CONTINUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ctxt->fetch.end += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (unlikely(done_size < size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return __do_insn_fetch_bytes(ctxt, size - done_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Fetch next part of the instruction being emulated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) #define insn_fetch(_type, _ctxt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ({ _type _x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (rc != X86EMUL_CONTINUE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto done; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ctxt->_eip += sizeof(_type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ctxt->fetch.ptr += sizeof(_type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) _x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) #define insn_fetch_arr(_arr, _size, _ctxt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) rc = do_insn_fetch_bytes(_ctxt, _size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (rc != X86EMUL_CONTINUE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) goto done; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ctxt->_eip += (_size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) memcpy(_arr, ctxt->fetch.ptr, _size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ctxt->fetch.ptr += (_size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Given the 'reg' portion of a ModRM byte, and a register block, return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * pointer into the block that addresses the relevant register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) int byteop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) p = reg_rmw(ctxt, modrm_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int read_descriptor(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) u16 *size, unsigned long *address, int op_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (op_bytes == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) op_bytes = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rc = segmented_read_std(ctxt, addr, size, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) addr.ea += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rc = segmented_read_std(ctxt, addr, address, op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) FASTOP2(add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) FASTOP2(or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) FASTOP2(adc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) FASTOP2(sbb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) FASTOP2(and);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) FASTOP2(sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) FASTOP2(xor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) FASTOP2(cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) FASTOP2(test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) FASTOP1SRC2(mul, mul_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) FASTOP1SRC2(imul, imul_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) FASTOP1SRC2EX(div, div_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) FASTOP1SRC2EX(idiv, idiv_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) FASTOP3WCL(shld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) FASTOP3WCL(shrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) FASTOP2W(imul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) FASTOP1(not);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) FASTOP1(neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) FASTOP1(inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) FASTOP1(dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) FASTOP2CL(rol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) FASTOP2CL(ror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) FASTOP2CL(rcl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) FASTOP2CL(rcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) FASTOP2CL(shl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) FASTOP2CL(shr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) FASTOP2CL(sar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) FASTOP2W(bsf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) FASTOP2W(bsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) FASTOP2W(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) FASTOP2W(bts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) FASTOP2W(btr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) FASTOP2W(btc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) FASTOP2(xadd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) FASTOP2R(cmp, cmp_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* If src is zero, do not writeback, but update flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (ctxt->src.val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return fastop(ctxt, em_bsf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /* If src is zero, do not writeback, but update flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (ctxt->src.val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return fastop(ctxt, em_bsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u8 rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) asm("push %[flags]; popf; " CALL_NOSPEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void fetch_register_operand(struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) switch (op->bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) op->val = *(u8 *)op->addr.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) op->val = *(u16 *)op->addr.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) op->val = *(u32 *)op->addr.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) op->val = *(u64 *)op->addr.reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static void emulator_get_fpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) fpregs_assert_state_consistent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (test_thread_flag(TIF_NEED_FPU_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) switch_fpu_return();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static void emulator_put_fpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void read_sse_reg(sse128_t *data, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static void write_sse_reg(sse128_t *data, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static void read_mmx_reg(u64 *data, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void write_mmx_reg(u64 *data, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static int em_fninit(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return emulate_nm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) asm volatile("fninit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) u16 fcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return emulate_nm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) asm volatile("fnstcw %0": "+m"(fcw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ctxt->dst.val = fcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u16 fsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return emulate_nm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) asm volatile("fnstsw %0": "+m"(fsw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ctxt->dst.val = fsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) unsigned reg = ctxt->modrm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (!(ctxt->d & ModRM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (ctxt->d & Sse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) op->type = OP_XMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) op->bytes = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) op->addr.xmm = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) read_sse_reg(&op->vec_val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (ctxt->d & Mmx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) reg &= 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) op->type = OP_MM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) op->bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) op->addr.mm = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) op->type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) fetch_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) op->orig_val = op->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ctxt->modrm_seg = VCPU_SREG_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static int decode_modrm(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) u8 sib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) int index_reg, base_reg, scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ulong modrm_ea = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ctxt->modrm_seg = VCPU_SREG_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) op->type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ctxt->d & ByteOp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (ctxt->d & Sse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) op->type = OP_XMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) op->bytes = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) op->addr.xmm = ctxt->modrm_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) read_sse_reg(&op->vec_val, ctxt->modrm_rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (ctxt->d & Mmx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) op->type = OP_MM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) op->bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) op->addr.mm = ctxt->modrm_rm & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) fetch_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) op->type = OP_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (ctxt->ad_bytes == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* 16-bit ModR/M decode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) switch (ctxt->modrm_mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (ctxt->modrm_rm == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) modrm_ea += insn_fetch(u16, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) modrm_ea += insn_fetch(s8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) modrm_ea += insn_fetch(u16, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) switch (ctxt->modrm_rm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) modrm_ea += bx + si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) modrm_ea += bx + di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) modrm_ea += bp + si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) modrm_ea += bp + di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) modrm_ea += si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) modrm_ea += di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (ctxt->modrm_mod != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) modrm_ea += bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) modrm_ea += bx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) ctxt->modrm_seg = VCPU_SREG_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) modrm_ea = (u16)modrm_ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* 32/64-bit ModR/M decode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if ((ctxt->modrm_rm & 7) == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sib = insn_fetch(u8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) index_reg |= (sib >> 3) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) base_reg |= sib & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) scale = sib >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) modrm_ea += insn_fetch(s32, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) modrm_ea += reg_read(ctxt, base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) adjust_modrm_seg(ctxt, base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Increment ESP on POP [ESP] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if ((ctxt->d & IncSP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) base_reg == VCPU_REGS_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) modrm_ea += ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (index_reg != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) modrm_ea += reg_read(ctxt, index_reg) << scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) modrm_ea += insn_fetch(s32, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ctxt->rip_relative = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) base_reg = ctxt->modrm_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) modrm_ea += reg_read(ctxt, base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) adjust_modrm_seg(ctxt, base_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) switch (ctxt->modrm_mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) modrm_ea += insn_fetch(s8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) modrm_ea += insn_fetch(s32, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) op->addr.mem.ea = modrm_ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (ctxt->ad_bytes != 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static int decode_abs(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) op->type = OP_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) switch (ctxt->ad_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) op->addr.mem.ea = insn_fetch(u16, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) op->addr.mem.ea = insn_fetch(u32, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) op->addr.mem.ea = insn_fetch(u64, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) long sv = 0, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) mask = ~((long)ctxt->dst.bytes * 8 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (ctxt->src.bytes == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) sv = (s16)ctxt->src.val & (s16)mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) else if (ctxt->src.bytes == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) sv = (s32)ctxt->src.val & (s32)mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sv = (s64)ctxt->src.val & (s64)mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ctxt->dst.addr.mem.ea = address_mask(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ctxt->dst.addr.mem.ea + (sv >> 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /* only subword offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int read_emulated(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) unsigned long addr, void *dest, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct read_cache *mc = &ctxt->mem_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (mc->pos < mc->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) goto read_cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) WARN_ON((mc->end + size) >= sizeof(mc->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) &ctxt->exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) mc->end += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) read_cached:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) memcpy(dest, mc->data + mc->pos, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) mc->pos += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static int segmented_read(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) rc = linearize(ctxt, addr, size, false, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return read_emulated(ctxt, linear, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int segmented_write(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) rc = linearize(ctxt, addr, size, true, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return ctxt->ops->write_emulated(ctxt, linear, data, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) &ctxt->exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct segmented_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) const void *orig_data, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) rc = linearize(ctxt, addr, size, true, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) size, &ctxt->exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) unsigned int size, unsigned short port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) void *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct read_cache *rc = &ctxt->io_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (rc->pos == rc->end) { /* refill pio read ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) unsigned int in_page, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) unsigned int count = ctxt->rep_prefix ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (n == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) rc->pos = rc->end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) rc->end = n * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (ctxt->rep_prefix && (ctxt->d & String) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) !(ctxt->eflags & X86_EFLAGS_DF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ctxt->dst.data = rc->data + rc->pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) ctxt->dst.type = OP_MEM_STR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ctxt->dst.count = (rc->end - rc->pos) / size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) rc->pos = rc->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) memcpy(dest, rc->data + rc->pos, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) rc->pos += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) u16 index, struct desc_struct *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct desc_ptr dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ulong addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ctxt->ops->get_idt(ctxt, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (dt.size < index * 8 + 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return emulate_gp(ctxt, index << 3 | 0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) addr = dt.address + index * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return linear_read_system(ctxt, addr, desc, sizeof(*desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) u16 selector, struct desc_ptr *dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) u32 base3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (selector & 1 << 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) u16 sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) memset(dt, 0, sizeof(*dt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (!ops->get_segment(ctxt, &sel, &desc, &base3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) VCPU_SREG_LDTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ops->get_gdt(ctxt, dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) u16 selector, ulong *desc_addr_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct desc_ptr dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) u16 index = selector >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ulong addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) get_descriptor_table_ptr(ctxt, selector, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (dt.size < index * 8 + 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return emulate_gp(ctxt, selector & 0xfffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) addr = dt.address + index * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (addr >> 32 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) u64 efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (!(efer & EFER_LMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) addr &= (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) *desc_addr_p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* allowed just for 8 bytes segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u16 selector, struct desc_struct *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) ulong *desc_addr_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /* allowed just for 8 bytes segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) u16 selector, struct desc_struct *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ulong addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) rc = get_descriptor_ptr(ctxt, selector, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return linear_write_system(ctxt, addr, desc, sizeof(*desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) u16 selector, int seg, u8 cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) enum x86_transfer_type transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct desc_struct *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct desc_struct seg_desc, old_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) u8 dpl, rpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) unsigned err_vec = GP_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) u32 err_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ulong desc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) u32 base3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) memset(&seg_desc, 0, sizeof(seg_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (ctxt->mode == X86EMUL_MODE_REAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* set real mode segment descriptor (keep limit etc. for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * unreal mode) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) set_desc_base(&seg_desc, selector << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) goto load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /* VM86 needs a clean new segment descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) set_desc_base(&seg_desc, selector << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) set_desc_limit(&seg_desc, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) seg_desc.type = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) seg_desc.p = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) seg_desc.s = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) seg_desc.dpl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) goto load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) rpl = selector & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* TR should be in GDT only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /* NULL selector is not valid for TR, CS and (except for long mode) SS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (null_selector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (seg == VCPU_SREG_SS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * ctxt->ops->set_segment expects the CPL to be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * SS.DPL, so fake an expand-up 32-bit data segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) seg_desc.type = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) seg_desc.p = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) seg_desc.s = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) seg_desc.dpl = cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) seg_desc.d = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) seg_desc.g = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /* Skip all following checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) goto load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) err_code = selector & 0xfffc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) GP_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* can't load system descriptor into segment selector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (seg <= VCPU_SREG_GS && !seg_desc.s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (transfer == X86_TRANSFER_CALL_JMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) dpl = seg_desc.dpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) switch (seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) case VCPU_SREG_SS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * segment is not a writable data segment or segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * selector's RPL != CPL or segment selector's RPL != CPL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) case VCPU_SREG_CS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (!(seg_desc.type & 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (seg_desc.type & 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /* conforming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (dpl > cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /* nonconforming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (rpl > cpl || dpl != cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* in long-mode d/b must be clear if l is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (seg_desc.d && seg_desc.l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) u64 efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (efer & EFER_LMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /* CS(RPL) <- CPL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) selector = (selector & 0xfffc) | cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) case VCPU_SREG_TR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (!seg_desc.p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) err_vec = NP_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) old_desc = seg_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) seg_desc.type |= 2; /* busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) sizeof(seg_desc), &ctxt->exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) case VCPU_SREG_LDTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (seg_desc.s || seg_desc.type != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) default: /* DS, ES, FS, or GS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * segment is not a data or readable code segment or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * ((segment is a data or nonconforming code segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * and (both RPL and CPL > DPL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if ((seg_desc.type & 0xa) == 0x8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) (((seg_desc.type & 0xc) != 0xc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) (rpl > dpl && cpl > dpl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (!seg_desc.p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) goto exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (seg_desc.s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /* mark segment as accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (!(seg_desc.type & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) seg_desc.type |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) ret = write_segment_descriptor(ctxt, selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) &seg_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) ((u64)base3 << 32), ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) *desc = seg_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return emulate_exception(ctxt, err_vec, err_code, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) u16 selector, int seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) u8 cpl = ctxt->ops->cpl(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * they can load it at CPL<3 (Intel's manual says only LSS can,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * but it's wrong).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * However, the Intel manual says that putting IST=1/DPL=3 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * an interrupt gate will result in SS=3 (the AMD manual instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * says it doesn't), so allow SS=3 in __load_segment_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * and only forbid it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (seg == VCPU_SREG_SS && selector == 3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return emulate_exception(ctxt, GP_VECTOR, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return __load_segment_descriptor(ctxt, selector, seg, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) X86_TRANSFER_NONE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static void write_register_operand(struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return assign_register(op->addr.reg, op->val, op->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) switch (op->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) case OP_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) write_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) case OP_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (ctxt->lock_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return segmented_cmpxchg(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) op->addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) &op->orig_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) &op->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) op->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return segmented_write(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) op->addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) &op->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) op->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) case OP_MEM_STR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return segmented_write(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) op->addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) op->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) op->bytes * op->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) case OP_XMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) write_sse_reg(&op->vec_val, op->addr.xmm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) case OP_MM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) write_mmx_reg(&op->mm_val, op->addr.mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) case OP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) /* no writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct segmented_address addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) rsp_increment(ctxt, -bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) addr.seg = VCPU_SREG_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) return segmented_write(ctxt, addr, data, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static int em_push(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static int emulate_pop(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) void *dest, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct segmented_address addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) addr.seg = VCPU_SREG_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) rc = segmented_read(ctxt, addr, dest, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) rsp_increment(ctxt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static int em_pop(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static int emulate_popf(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) void *dest, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) unsigned long val, change_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) int cpl = ctxt->ops->cpl(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) rc = emulate_pop(ctxt, &val, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) X86_EFLAGS_AC | X86_EFLAGS_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) switch(ctxt->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) case X86EMUL_MODE_PROT64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) case X86EMUL_MODE_PROT32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) case X86EMUL_MODE_PROT16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (cpl == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) change_mask |= X86_EFLAGS_IOPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (cpl <= iopl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) change_mask |= X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) case X86EMUL_MODE_VM86:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (iopl < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) change_mask |= X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) default: /* real mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) *(unsigned long *)dest =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) (ctxt->eflags & ~change_mask) | (val & change_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static int em_popf(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) ctxt->dst.type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) ctxt->dst.addr.reg = &ctxt->eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ctxt->dst.bytes = ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) static int em_enter(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) unsigned frame_size = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) unsigned nesting_level = ctxt->src2.val & 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) ulong rbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (nesting_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) rbp = reg_read(ctxt, VCPU_REGS_RBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) rc = push(ctxt, &rbp, stack_size(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) stack_mask(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) stack_mask(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static int em_leave(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) stack_mask(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) int seg = ctxt->src2.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ctxt->src.val = get_segment_selector(ctxt, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (ctxt->op_bytes == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) rsp_increment(ctxt, -2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) ctxt->op_bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) int seg = ctxt->src2.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) unsigned long selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) rc = emulate_pop(ctxt, &selector, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (ctxt->modrm_reg == VCPU_SREG_SS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (ctxt->op_bytes > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) rsp_increment(ctxt, ctxt->op_bytes - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) rc = load_segment_descriptor(ctxt, (u16)selector, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static int em_pusha(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) int reg = VCPU_REGS_RAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) while (reg <= VCPU_REGS_RDI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) (reg == VCPU_REGS_RSP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) ++reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) static int em_pushf(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) return em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static int em_popa(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) int reg = VCPU_REGS_RDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) while (reg >= VCPU_REGS_RAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (reg == VCPU_REGS_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) rsp_increment(ctxt, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) --reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) --reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) struct desc_ptr dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) gva_t cs_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) gva_t eip_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) u16 cs, eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* TODO: Add limit checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ctxt->src.val = ctxt->eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) ctxt->src.val = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) ops->get_idt(ctxt, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) eip_addr = dt.address + (irq << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) cs_addr = dt.address + (irq << 2) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) rc = linear_read_system(ctxt, cs_addr, &cs, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) rc = linear_read_system(ctxt, eip_addr, &eip, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) ctxt->_eip = eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) invalidate_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) rc = __emulate_int_real(ctxt, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (rc == X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) writeback_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) switch(ctxt->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) case X86EMUL_MODE_REAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return __emulate_int_real(ctxt, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) case X86EMUL_MODE_VM86:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) case X86EMUL_MODE_PROT16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) case X86EMUL_MODE_PROT32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) case X86EMUL_MODE_PROT64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* Protected mode interrupts unimplemented yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) unsigned long temp_eip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) unsigned long temp_eflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) unsigned long cs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) X86_EFLAGS_AC | X86_EFLAGS_ID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) X86_EFLAGS_VIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) /* TODO: Add stack limit check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (temp_eip & ~0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ctxt->_eip = temp_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (ctxt->op_bytes == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) else if (ctxt->op_bytes == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) ctxt->eflags &= ~0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) ctxt->eflags |= temp_eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) ctxt->eflags |= X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ctxt->ops->set_nmi_mask(ctxt, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) static int em_iret(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) switch(ctxt->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) case X86EMUL_MODE_REAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return emulate_iret_real(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) case X86EMUL_MODE_VM86:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) case X86EMUL_MODE_PROT16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) case X86EMUL_MODE_PROT32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) case X86EMUL_MODE_PROT64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /* iret from protected mode unimplemented yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) unsigned short sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct desc_struct new_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) u8 cpl = ctxt->ops->cpl(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) X86_TRANSFER_CALL_JMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) &new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) /* Error handling is not implemented. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return assign_eip_near(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) long int old_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) old_eip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) rc = assign_eip_near(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ctxt->src.val = old_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) u64 old = ctxt->dst.orig_val64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (ctxt->dst.bytes == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) ctxt->eflags &= ~X86_EFLAGS_ZF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) (u32) reg_read(ctxt, VCPU_REGS_RBX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) ctxt->eflags |= X86_EFLAGS_ZF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) static int em_ret(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) unsigned long eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return assign_eip_near(ctxt, eip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) static int em_ret_far(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) unsigned long eip, cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int cpl = ctxt->ops->cpl(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct desc_struct new_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) /* Outer-privilege level return is not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) X86_TRANSFER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) &new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) rc = assign_eip_far(ctxt, eip, &new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) /* Error handling is not implemented. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) rc = em_ret_far(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) rsp_increment(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /* Save real source value, then compare EAX against destination. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) ctxt->dst.orig_val = ctxt->dst.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) ctxt->src.orig_val = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) ctxt->src.val = ctxt->dst.orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) fastop(ctxt, em_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (ctxt->eflags & X86_EFLAGS_ZF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) /* Success: write back to memory; no update of EAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ctxt->src.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) ctxt->dst.val = ctxt->src.orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* Failure: write the value we saw to EAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) ctxt->src.type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) ctxt->src.val = ctxt->dst.orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) /* Create write-cycle to dest by writing the same value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) ctxt->dst.val = ctxt->dst.orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static int em_lseg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) int seg = ctxt->src2.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) unsigned short sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) rc = load_segment_descriptor(ctxt, sel, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ctxt->dst.val = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) return ctxt->ops->guest_has_long_mode(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) desc->g = (flags >> 23) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) desc->d = (flags >> 22) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) desc->l = (flags >> 21) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) desc->avl = (flags >> 20) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) desc->p = (flags >> 15) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) desc->dpl = (flags >> 13) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) desc->s = (flags >> 12) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) desc->type = (flags >> 8) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) u16 selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (n < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) offset = 0x7f84 + n * 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) offset = 0x7f2c + (n - 3) * 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) u16 selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) u32 base3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) offset = 0x7e00 + n * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) selector = GET_SMSTATE(u16, smstate, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) base3 = GET_SMSTATE(u32, smstate, offset + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) u64 cr0, u64 cr3, u64 cr4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) int bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) u64 pcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) pcid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (cr4 & X86_CR4_PCIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) pcid = cr3 & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) cr3 &= ~0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) bad = ctxt->ops->set_cr(ctxt, 3, cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) * First enable PAE, long mode needs it before CR0.PG = 1 is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) * Then enable protected mode. However, PCID cannot be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * if EFER.LMA=0, so set it separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) bad = ctxt->ops->set_cr(ctxt, 0, cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (cr4 & X86_CR4_PCIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) bad = ctxt->ops->set_cr(ctxt, 4, cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (pcid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) const char *smstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) struct desc_ptr dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) u16 selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) u32 val, cr0, cr3, cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) val = GET_SMSTATE(u32, smstate, 0x7fcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) val = GET_SMSTATE(u32, smstate, 0x7fc8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) selector = GET_SMSTATE(u32, smstate, 0x7fc4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) selector = GET_SMSTATE(u32, smstate, 0x7fc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ctxt->ops->set_gdt(ctxt, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) ctxt->ops->set_idt(ctxt, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) int r = rsm_load_seg_32(ctxt, smstate, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (r != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) const char *smstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) struct desc_ptr dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) u64 val, cr0, cr3, cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) u32 base3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) u16 selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) val = GET_SMSTATE(u64, smstate, 0x7f68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) val = GET_SMSTATE(u64, smstate, 0x7f60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) val = GET_SMSTATE(u64, smstate, 0x7ed0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) selector = GET_SMSTATE(u32, smstate, 0x7e90);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) ctxt->ops->set_idt(ctxt, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) selector = GET_SMSTATE(u32, smstate, 0x7e70);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) ctxt->ops->set_gdt(ctxt, &dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (r != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) r = rsm_load_seg_64(ctxt, smstate, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (r != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) static int em_rsm(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) unsigned long cr0, cr4, efer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) char buf[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) u64 smbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) smbase = ctxt->ops->get_smbase(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) ctxt->ops->set_nmi_mask(ctxt, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * Get back to real mode, to prepare a safe state in which to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * supports long mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (emulator_has_longmode(ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) struct desc_struct cs_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /* Zero CR4.PCIDE before CR0.PG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) cr4 = ctxt->ops->get_cr(ctxt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (cr4 & X86_CR4_PCIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) /* A 32-bit code segment is required to clear EFER.LMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) memset(&cs_desc, 0, sizeof(cs_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) cs_desc.type = 0xb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) cs_desc.s = cs_desc.g = cs_desc.p = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) /* For the 64-bit case, this will clear EFER.LMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) cr0 = ctxt->ops->get_cr(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (cr0 & X86_CR0_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (emulator_has_longmode(ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) /* Clear CR4.PAE before clearing EFER.LME. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) cr4 = ctxt->ops->get_cr(ctxt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) if (cr4 & X86_CR4_PAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) /* And finally go back to 32-bit mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * Give pre_leave_smm() a chance to make ISA-specific changes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * vCPU state (e.g. enter guest mode) before loading state from the SMM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * state-save area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (ctxt->ops->pre_leave_smm(ctxt, buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (emulator_has_longmode(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) ret = rsm_load_state_64(ctxt, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) ret = rsm_load_state_32(ctxt, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (ret != X86EMUL_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) /* FIXME: should triple fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) ctxt->ops->post_leave_smm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) struct desc_struct *cs, struct desc_struct *ss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) cs->l = 0; /* will be adjusted later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) set_desc_base(cs, 0); /* flat segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) cs->g = 1; /* 4kb granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) set_desc_limit(cs, 0xfffff); /* 4GB limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) cs->type = 0x0b; /* Read, Execute, Accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) cs->s = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) cs->dpl = 0; /* will be adjusted later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) cs->p = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) cs->d = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) cs->avl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) set_desc_base(ss, 0); /* flat segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) set_desc_limit(ss, 0xfffff); /* 4GB limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) ss->g = 1; /* 4kb granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) ss->s = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) ss->type = 0x03; /* Read/Write, Accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) ss->d = 1; /* 32bit stack segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) ss->dpl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) ss->p = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) ss->l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) ss->avl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) eax = ecx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return is_guest_vendor_intel(ebx, ecx, edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * syscall should always be enabled in longmode - so only become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * vendor specific (cpuid) if other modes are active...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) eax = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) ecx = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * 64bit guest with a 32bit compat-app running will #UD !! While this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * behaviour can be fixed (by emulating) into AMD response - CPUs of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * AMD can't behave like Intel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (is_guest_vendor_intel(ebx, ecx, edx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (is_guest_vendor_amd(ebx, ecx, edx) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) is_guest_vendor_hygon(ebx, ecx, edx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * default: (not Intel, not AMD, not Hygon), apply Intel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) * stricter rules...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) static int em_syscall(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) struct desc_struct cs, ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) u64 msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) u16 cs_sel, ss_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) u64 efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) /* syscall is not available in real mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (ctxt->mode == X86EMUL_MODE_REAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) ctxt->mode == X86EMUL_MODE_VM86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (!(em_syscall_is_enabled(ctxt)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) ops->get_msr(ctxt, MSR_EFER, &efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (!(efer & EFER_SCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) setup_syscalls_segments(ctxt, &cs, &ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) ops->get_msr(ctxt, MSR_STAR, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) msr_data >>= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) cs_sel = (u16)(msr_data & 0xfffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) ss_sel = (u16)(msr_data + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (efer & EFER_LMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) cs.d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) cs.l = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (efer & EFER_LMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) ops->get_msr(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) ctxt->mode == X86EMUL_MODE_PROT64 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) MSR_LSTAR : MSR_CSTAR, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) ctxt->_eip = msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) ctxt->eflags &= ~msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) ctxt->eflags |= X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) /* legacy mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) ops->get_msr(ctxt, MSR_STAR, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) ctxt->_eip = (u32)msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) static int em_sysenter(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) struct desc_struct cs, ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) u64 msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) u16 cs_sel, ss_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) u64 efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) ops->get_msr(ctxt, MSR_EFER, &efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) /* inject #GP if in real mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (ctxt->mode == X86EMUL_MODE_REAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) * Not recognized on AMD in compat mode (but is recognized in legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) * mode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) && !vendor_intel(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) /* sysenter/sysexit have not been tested in 64bit mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) if ((msr_data & 0xfffc) == 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) setup_syscalls_segments(ctxt, &cs, &ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ss_sel = cs_sel + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (efer & EFER_LMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) cs.d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) cs.l = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) (u32)msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (efer & EFER_LMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) ctxt->mode = X86EMUL_MODE_PROT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) static int em_sysexit(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) struct desc_struct cs, ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) u64 msr_data, rcx, rdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) int usermode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) u16 cs_sel = 0, ss_sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) /* inject #GP if in real mode or Virtual 8086 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (ctxt->mode == X86EMUL_MODE_REAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) ctxt->mode == X86EMUL_MODE_VM86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) setup_syscalls_segments(ctxt, &cs, &ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) if ((ctxt->rex_prefix & 0x8) != 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) usermode = X86EMUL_MODE_PROT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) usermode = X86EMUL_MODE_PROT32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) rcx = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) rdx = reg_read(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) cs.dpl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) ss.dpl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) switch (usermode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) case X86EMUL_MODE_PROT32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) cs_sel = (u16)(msr_data + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) if ((msr_data & 0xfffc) == 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) ss_sel = (u16)(msr_data + 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) rcx = (u32)rcx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) rdx = (u32)rdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) case X86EMUL_MODE_PROT64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) cs_sel = (u16)(msr_data + 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (msr_data == 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) ss_sel = cs_sel + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) cs.d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) cs.l = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (emul_is_noncanonical_address(rcx, ctxt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) emul_is_noncanonical_address(rdx, ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) cs_sel |= SEGMENT_RPL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) ss_sel |= SEGMENT_RPL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) ctxt->_eip = rdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) int iopl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (ctxt->mode == X86EMUL_MODE_REAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (ctxt->mode == X86EMUL_MODE_VM86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) return ctxt->ops->cpl(ctxt) > iopl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) #define VMWARE_PORT_VMPORT (0x5658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) #define VMWARE_PORT_VMRPC (0x5659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) u16 port, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) struct desc_struct tr_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) u32 base3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) unsigned mask = (1 << len) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * VMware allows access to these ports even if denied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * by TSS I/O permission bitmap. Mimic behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (enable_vmware_backdoor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) if (!tr_seg.p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (desc_limit_scaled(&tr_seg) < 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) base = get_desc_base(&tr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) base |= ((u64)base3) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) if (r != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (r != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) if ((perm >> bit_idx) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) u16 port, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (ctxt->perm_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (emulator_bad_iopl(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (!emulator_io_port_access_allowed(ctxt, port, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ctxt->perm_ok = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) * Intel CPUs mask the counter and pointers in quite strange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) * manner when ECX is zero due to REP-string optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) *reg_write(ctxt, VCPU_REGS_RCX) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) switch (ctxt->b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) case 0xa4: /* movsb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) case 0xa5: /* movsd/w */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) case 0xaa: /* stosb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) case 0xab: /* stosd/w */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct tss_segment_16 *tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) tss->ip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) tss->flag = ctxt->eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) tss->si = reg_read(ctxt, VCPU_REGS_RSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) tss->di = reg_read(ctxt, VCPU_REGS_RDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) struct tss_segment_16 *tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) u8 cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) ctxt->_eip = tss->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) ctxt->eflags = tss->flag | 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * SDM says that segment selectors are loaded before segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) cpl = tss->cs & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * Now load segment descriptors. If fault happens at this stage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) * it is handled in a context of new task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) static int task_switch_16(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) u16 tss_selector, u16 old_tss_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) ulong old_tss_base, struct desc_struct *new_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) struct tss_segment_16 tss_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) u32 new_tss_base = get_desc_base(new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) save_state_to_tss16(ctxt, &tss_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) if (old_tss_sel != 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) tss_seg.prev_task_link = old_tss_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) ret = linear_write_system(ctxt, new_tss_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) &tss_seg.prev_task_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) sizeof(tss_seg.prev_task_link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) return load_state_from_tss16(ctxt, &tss_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) struct tss_segment_32 *tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) /* CR3 and ldt selector are not saved intentionally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) tss->eip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) tss->eflags = ctxt->eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) struct tss_segment_32 *tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) u8 cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) ctxt->_eip = tss->eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) ctxt->eflags = tss->eflags | 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) /* General purpose registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) * SDM says that segment selectors are loaded before segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) * descriptors. This is important because CPL checks will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) * use CS.RPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) * If we're switching between Protected Mode and VM86, we need to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * sure to update the mode before loading the segment descriptors so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) * that the selectors are interpreted correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) if (ctxt->eflags & X86_EFLAGS_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) ctxt->mode = X86EMUL_MODE_VM86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) cpl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) ctxt->mode = X86EMUL_MODE_PROT32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) cpl = tss->cs & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) * Now load segment descriptors. If fault happenes at this stage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) * it is handled in a context of new task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) cpl, X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) X86_TRANSFER_TASK_SWITCH, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) static int task_switch_32(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) u16 tss_selector, u16 old_tss_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) ulong old_tss_base, struct desc_struct *new_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) struct tss_segment_32 tss_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) u32 new_tss_base = get_desc_base(new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) u32 eip_offset = offsetof(struct tss_segment_32, eip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) save_state_to_tss32(ctxt, &tss_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) /* Only GP registers and segment selectors are saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) ldt_sel_offset - eip_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) if (old_tss_sel != 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) tss_seg.prev_task_link = old_tss_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) ret = linear_write_system(ctxt, new_tss_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) &tss_seg.prev_task_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) sizeof(tss_seg.prev_task_link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) return load_state_from_tss32(ctxt, &tss_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) u16 tss_selector, int idt_index, int reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) bool has_error_code, u32 error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) struct desc_struct curr_tss_desc, next_tss_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) ulong old_tss_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) u32 desc_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) ulong desc_addr, dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) /* FIXME: old_tss_base == ~0 ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) /* FIXME: check that next_tss_desc is tss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) * Check privileges. The three cases are task switch caused by...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) * 1. jmp/call/int to task gate: Check against DPL of the task gate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) * 2. Exception/IRQ/iret: No check is performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) * 3. jmp/call to TSS/task-gate: No check is performed since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) * hardware checks it before exiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (reason == TASK_SWITCH_GATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) if (idt_index != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) /* Software interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) struct desc_struct task_gate_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) int dpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) ret = read_interrupt_descriptor(ctxt, idt_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) &task_gate_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) dpl = task_gate_desc.dpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) return emulate_gp(ctxt, (idt_index << 3) | 0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) desc_limit = desc_limit_scaled(&next_tss_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (!next_tss_desc.p ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) desc_limit < 0x2b)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) return emulate_ts(ctxt, tss_selector & 0xfffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) if (reason == TASK_SWITCH_IRET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) /* set back link to prev task only if NT bit is set in eflags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) note that old_tss_sel is not used after this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) old_tss_sel = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) if (next_tss_desc.type & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) old_tss_base, &next_tss_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) old_tss_base, &next_tss_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) if (ret != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) if (reason != TASK_SWITCH_IRET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) next_tss_desc.type |= (1 << 1); /* set busy flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) if (has_error_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) ctxt->lock_prefix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) ctxt->src.val = (unsigned long) error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) ret = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) ops->get_dr(ctxt, 7, &dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) u16 tss_selector, int idt_index, int reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) bool has_error_code, u32 error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) invalidate_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) ctxt->_eip = ctxt->eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) has_error_code, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) if (rc == X86EMUL_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) ctxt->eip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) writeback_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) register_address_increment(ctxt, reg, df * op->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) op->addr.mem.ea = register_address(ctxt, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) static int em_das(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) u8 al, old_al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) bool af, cf, old_cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) cf = ctxt->eflags & X86_EFLAGS_CF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) al = ctxt->dst.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) old_al = al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) old_cf = cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) cf = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) af = ctxt->eflags & X86_EFLAGS_AF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) if ((al & 0x0f) > 9 || af) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) al -= 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) cf = old_cf | (al >= 250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) af = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) af = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (old_al > 0x99 || old_cf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) al -= 0x60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) cf = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) ctxt->dst.val = al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) /* Set PF, ZF, SF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) ctxt->src.type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) ctxt->src.val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) ctxt->src.bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) fastop(ctxt, em_or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) if (cf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) ctxt->eflags |= X86_EFLAGS_CF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (af)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) ctxt->eflags |= X86_EFLAGS_AF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) static int em_aam(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) u8 al, ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) if (ctxt->src.val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) return emulate_de(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) al = ctxt->dst.val & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) ah = al / ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) al %= ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) /* Set PF, ZF, SF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) ctxt->src.type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) ctxt->src.val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) ctxt->src.bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) fastop(ctxt, em_or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) static int em_aad(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) u8 al = ctxt->dst.val & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) u8 ah = (ctxt->dst.val >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) al = (al + (ah * ctxt->src.val)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) /* Set PF, ZF, SF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) ctxt->src.type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) ctxt->src.val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) ctxt->src.bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) fastop(ctxt, em_or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) static int em_call(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) long rel = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) ctxt->src.val = (unsigned long)ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) rc = jmp_rel(ctxt, rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) return em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) static int em_call_far(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) u16 sel, old_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) ulong old_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) struct desc_struct old_desc, new_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) int cpl = ctxt->ops->cpl(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) enum x86emul_mode prev_mode = ctxt->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) old_eip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) X86_TRANSFER_CALL_JMP, &new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) ctxt->src.val = old_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ctxt->src.val = old_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) rc = em_push(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) /* If we failed, we tainted the memory, but the very least we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) restore cs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) if (rc != X86EMUL_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) pr_warn_once("faulting far call emulation tainted memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) ctxt->mode = prev_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) unsigned long eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) rc = assign_eip_near(ctxt, eip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) rsp_increment(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) static int em_xchg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) /* Write back the register source. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) ctxt->src.val = ctxt->dst.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) write_register_operand(&ctxt->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) /* Write back the memory destination with implicit LOCK prefix. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) ctxt->dst.val = ctxt->src.orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) ctxt->lock_prefix = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) ctxt->dst.val = ctxt->src2.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) return fastop(ctxt, em_imul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) static int em_cwd(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) ctxt->dst.type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) ctxt->dst.bytes = ctxt->src.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) static int em_rdpid(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) u64 tsc_aux = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) ctxt->dst.val = tsc_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) u64 tsc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) u64 pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) static int em_mov(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) static int em_movbe(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) if (!ctxt->ops->guest_has_movbe(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) switch (ctxt->op_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) * From MOVBE definition: "...When the operand size is 16 bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) * the upper word of the destination register remains unchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) * ..."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) * Both casting ->valptr and ->val to u16 breaks strict aliasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) * rules so we have to do the operation almost per hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) tmp = (u16)ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) ctxt->dst.val &= ~0xffffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) ctxt->dst.val |= (unsigned long)swab16(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) ctxt->dst.val = swab32((u32)ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) ctxt->dst.val = swab64(ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) static int em_cr_write(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) static int em_dr_write(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) val = ctxt->src.val & ~0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) val = ctxt->src.val & ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) /* #UD condition is already handled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) u64 msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) if (r == X86EMUL_IO_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (r > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) u64 msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) if (r == X86EMUL_IO_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) if (segment > VCPU_SREG_GS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) ctxt->ops->cpl(ctxt) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) ctxt->dst.val = get_segment_selector(ctxt, segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) ctxt->dst.bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) if (ctxt->modrm_reg > VCPU_SREG_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) return em_store_sreg(ctxt, ctxt->modrm_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) u16 sel = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) if (ctxt->modrm_reg == VCPU_SREG_SS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) static int em_sldt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) return em_store_sreg(ctxt, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) static int em_lldt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) u16 sel = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) static int em_str(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) return em_store_sreg(ctxt, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) static int em_ltr(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) u16 sel = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) static int em_invlpg(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) ulong linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (rc == X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) ctxt->ops->invlpg(ctxt, linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) static int em_clts(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) ulong cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) cr0 = ctxt->ops->get_cr(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) cr0 &= ~X86_CR0_TS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) ctxt->ops->set_cr(ctxt, 0, cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) static int em_hypercall(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) int rc = ctxt->ops->fix_hypercall(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) /* Let the processor re-execute the fixed hypercall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) ctxt->_eip = ctxt->eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) void (*get)(struct x86_emulate_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) struct desc_ptr *ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) struct desc_ptr desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) ctxt->ops->cpl(ctxt) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) ctxt->op_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) get(ctxt, &desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) if (ctxt->op_bytes == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) ctxt->op_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) desc_ptr.address &= 0x00ffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) return segmented_write_std(ctxt, ctxt->dst.addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) &desc_ptr, 2 + ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) static int em_sgdt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) static int em_sidt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) struct desc_ptr desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) ctxt->op_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) rc = read_descriptor(ctxt, ctxt->src.addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) &desc_ptr.size, &desc_ptr.address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) ctxt->op_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) if (ctxt->mode == X86EMUL_MODE_PROT64 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) emul_is_noncanonical_address(desc_ptr.address, ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (lgdt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) ctxt->ops->set_gdt(ctxt, &desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) ctxt->ops->set_idt(ctxt, &desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) static int em_lgdt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) return em_lgdt_lidt(ctxt, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) static int em_lidt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) return em_lgdt_lidt(ctxt, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) static int em_smsw(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) ctxt->ops->cpl(ctxt) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (ctxt->dst.type == OP_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) ctxt->dst.bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) static int em_lmsw(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) | (ctxt->src.val & 0x0f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) static int em_loop(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) register_address_increment(ctxt, VCPU_REGS_RCX, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) rc = jmp_rel(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) static int em_jcxz(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) rc = jmp_rel(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) static int em_in(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) &ctxt->dst.val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) return X86EMUL_IO_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) static int em_out(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) &ctxt->src.val, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) static int em_cli(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) if (emulator_bad_iopl(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) ctxt->eflags &= ~X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) static int em_sti(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) if (emulator_bad_iopl(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) ctxt->eflags |= X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) static int em_cpuid(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) u64 msr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) ctxt->ops->cpl(ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) eax = reg_read(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) ecx = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) *reg_write(ctxt, VCPU_REGS_RAX) = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) *reg_write(ctxt, VCPU_REGS_RDX) = edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) static int em_sahf(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) X86_EFLAGS_SF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) ctxt->eflags &= ~0xffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) ctxt->eflags |= flags | X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) static int em_lahf(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) static int em_bswap(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) switch (ctxt->op_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) asm("bswap %0" : "+r"(ctxt->dst.val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) static int em_clflush(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) /* emulating clflush regardless of cpuid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) /* emulating clflushopt regardless of cpuid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) static int em_movsxd(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) ctxt->dst.val = (s32) ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) static int check_fxsr(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) if (!ctxt->ops->guest_has_fxsr(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) return emulate_nm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) * Don't emulate a case that should never be hit, instead of working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) * around a lack of fxsave64/fxrstor64 on old compilers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) if (ctxt->mode >= X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) return X86EMUL_UNHANDLEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) * and restore MXCSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) static size_t __fxstate_size(int nregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) bool cr4_osfxsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) return __fxstate_size(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) return __fxstate_size(cr4_osfxsr ? 8 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) * 1) 16 bit mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) * 2) 32 bit mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) * save and restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) * 3) 64-bit mode with REX.W prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) * - like (2), but XMM 8-15 are being saved and restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) * 4) 64-bit mode without REX.W prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) * - like (3), but FIP and FDP are 64 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) * desired result. (4) is not emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) * and FPU DS) should match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) static int em_fxsave(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) struct fxregs_state fx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) rc = check_fxsr(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) fxstate_size(ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) * FXRSTOR might restore XMM registers not provided by the guest. Fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) * in the host registers (via FXSAVE) instead, so they won't be modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) * (preemption has to stay disabled until FXRSTOR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) * Use noinline to keep the stack for other functions called by callers small.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) static noinline int fxregs_fixup(struct fxregs_state *fx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) const size_t used_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) struct fxregs_state fx_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) __fxstate_size(16) - used_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) struct fxregs_state fx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) rc = check_fxsr(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) size = fxstate_size(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if (size < __fxstate_size(16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) rc = fxregs_fixup(&fx_state, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) if (fx_state.mxcsr >> 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) rc = emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) if (rc == X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) u32 eax, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) eax = reg_read(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) edx = reg_read(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) ecx = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) static bool valid_cr(int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) switch (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) case 2 ... 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) static int check_cr_access(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) if (!valid_cr(ctxt->modrm_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) unsigned long dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) ctxt->ops->get_dr(ctxt, 7, &dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) /* Check if DR7.Global_Enable is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) return dr7 & (1 << 13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) static int check_dr_read(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) int dr = ctxt->modrm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) u64 cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) if (dr > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) cr4 = ctxt->ops->get_cr(ctxt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) if (check_dr7_gd(ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) ulong dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) ctxt->ops->get_dr(ctxt, 6, &dr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) dr6 &= ~DR_TRAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) dr6 |= DR6_BD | DR6_RTM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) ctxt->ops->set_dr(ctxt, 6, dr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) return emulate_db(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) static int check_dr_write(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) u64 new_val = ctxt->src.val64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) int dr = ctxt->modrm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) return check_dr_read(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) static int check_svme(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) u64 efer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) if (!(efer & EFER_SVME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) /* Valid physical address? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) if (rax & 0xffff000000000000ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) return check_svme(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) return emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) * VMware allows access to these Pseduo-PMCs even when read via RDPMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) * in Ring3 when CR4.PCE=0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) ctxt->ops->check_pmc(ctxt, rcx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) static int check_perm_in(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) static int check_perm_out(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) ctxt->src.bytes = min(ctxt->src.bytes, 4u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) return emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) #define D(_y) { .flags = (_y) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) .intercept = x86_intercept_##_i, .check_perm = (_p) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) #define N D(NotImpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) #define II(_f, _e, _i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) #define IIP(_f, _e, _i, _p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) .intercept = x86_intercept_##_i, .check_perm = (_p) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) #define D2bv(_f) D((_f) | ByteOp), D(_f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) #define I2bvIP(_f, _e, _i, _p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) static const struct opcode group7_rm0[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) I(SrcNone | Priv | EmulateOnUD, em_hypercall),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) static const struct opcode group7_rm1[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) DI(SrcNone | Priv, monitor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) DI(SrcNone | Priv, mwait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) static const struct opcode group7_rm2[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) II(ImplicitOps | Priv, em_xsetbv, xsetbv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) static const struct opcode group7_rm3[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) DIP(SrcNone | Prot | Priv, stgi, check_svme),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) DIP(SrcNone | Prot | Priv, clgi, check_svme),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) DIP(SrcNone | Prot | Priv, skinit, check_svme),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) DIP(SrcNone | Prot | Priv, invlpga, check_svme),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) static const struct opcode group7_rm7[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) DIP(SrcNone, rdtscp, check_rdtsc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) static const struct opcode group1[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) F(Lock, em_add),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) F(Lock | PageTable, em_or),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) F(Lock, em_adc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) F(Lock, em_sbb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) F(Lock | PageTable, em_and),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) F(Lock, em_sub),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) F(Lock, em_xor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) F(NoWrite, em_cmp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) static const struct opcode group1A[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) static const struct opcode group2[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) F(DstMem | ModRM, em_rol),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) F(DstMem | ModRM, em_ror),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) F(DstMem | ModRM, em_rcl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) F(DstMem | ModRM, em_rcr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) F(DstMem | ModRM, em_shl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) F(DstMem | ModRM, em_shr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) F(DstMem | ModRM, em_shl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) F(DstMem | ModRM, em_sar),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) static const struct opcode group3[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) F(DstMem | SrcImm | NoWrite, em_test),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) F(DstMem | SrcImm | NoWrite, em_test),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) F(DstMem | SrcNone | Lock, em_not),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) F(DstMem | SrcNone | Lock, em_neg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) F(DstXacc | Src2Mem, em_mul_ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) F(DstXacc | Src2Mem, em_imul_ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) F(DstXacc | Src2Mem, em_div_ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) F(DstXacc | Src2Mem, em_idiv_ex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) static const struct opcode group4[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) F(ByteOp | DstMem | SrcNone | Lock, em_inc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) F(ByteOp | DstMem | SrcNone | Lock, em_dec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) static const struct opcode group5[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) F(DstMem | SrcNone | Lock, em_inc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) F(DstMem | SrcNone | Lock, em_dec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) I(SrcMem | NearBranch, em_call_near_abs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) I(SrcMemFAddr | ImplicitOps, em_call_far),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) I(SrcMem | NearBranch, em_jmp_abs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) I(SrcMemFAddr | ImplicitOps, em_jmp_far),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) static const struct opcode group6[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) II(Prot | DstMem, em_sldt, sldt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) II(Prot | DstMem, em_str, str),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) II(Prot | Priv | SrcMem16, em_lldt, lldt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) II(Prot | Priv | SrcMem16, em_ltr, ltr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) static const struct group_dual group7 = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) II(Mov | DstMem, em_sgdt, sgdt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) II(Mov | DstMem, em_sidt, sidt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) II(SrcMem | Priv, em_lgdt, lgdt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) II(SrcMem | Priv, em_lidt, lidt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) EXT(0, group7_rm0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) EXT(0, group7_rm1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) EXT(0, group7_rm2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) EXT(0, group7_rm3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) EXT(0, group7_rm7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) static const struct opcode group8[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) F(DstMem | SrcImmByte | NoWrite, em_bt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) F(DstMem | SrcImmByte | Lock, em_btr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) * The "memory" destination is actually always a register, since we come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) * from the register case of group9.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) static const struct gprefix pfx_0f_c7_7 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) static const struct group_dual group9 = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) GP(0, &pfx_0f_c7_7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) static const struct opcode group11[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) I(DstMem | SrcImm | Mov | PageTable, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) X7(D(Undefined)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) static const struct gprefix pfx_0f_ae_7 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) static const struct group_dual group15 = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) I(ModRM | Aligned16, em_fxsave),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) I(ModRM | Aligned16, em_fxrstor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) N, N, N, N, N, GP(0, &pfx_0f_ae_7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) static const struct gprefix pfx_0f_6f_0f_7f = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) static const struct instr_dual instr_dual_0f_2b = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) I(0, em_mov), N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) static const struct gprefix pfx_0f_2b = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) static const struct gprefix pfx_0f_10_0f_11 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) static const struct gprefix pfx_0f_28_0f_29 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) I(Aligned, em_mov), I(Aligned, em_mov), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) static const struct gprefix pfx_0f_e7 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) N, I(Sse, em_mov), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) static const struct escape escape_d9 = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) /* 0xC0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) /* 0xC8 - 0xCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) /* 0xD0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) /* 0xD8 - 0xDF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) /* 0xE0 - 0xE7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) /* 0xE8 - 0xEF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) /* 0xF0 - 0xF7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) /* 0xF8 - 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) static const struct escape escape_db = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) /* 0xC0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) /* 0xC8 - 0xCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) /* 0xD0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) /* 0xD8 - 0xDF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) /* 0xE0 - 0xE7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) /* 0xE8 - 0xEF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) /* 0xF0 - 0xF7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) /* 0xF8 - 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) static const struct escape escape_dd = { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) /* 0xC0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) /* 0xC8 - 0xCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) /* 0xD0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) /* 0xD8 - 0xDF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) /* 0xE0 - 0xE7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) /* 0xE8 - 0xEF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) /* 0xF0 - 0xF7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) /* 0xF8 - 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) static const struct instr_dual instr_dual_0f_c3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) static const struct mode_dual mode_dual_63 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) static const struct opcode opcode_table[256] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) /* 0x00 - 0x07 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) F6ALU(Lock, em_add),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) /* 0x08 - 0x0F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) F6ALU(Lock | PageTable, em_or),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) /* 0x10 - 0x17 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) F6ALU(Lock, em_adc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) /* 0x18 - 0x1F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) F6ALU(Lock, em_sbb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) /* 0x20 - 0x27 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) F6ALU(Lock | PageTable, em_and), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) /* 0x28 - 0x2F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) /* 0x30 - 0x37 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) F6ALU(Lock, em_xor), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) /* 0x38 - 0x3F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) F6ALU(NoWrite, em_cmp), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) /* 0x40 - 0x4F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) /* 0x50 - 0x57 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) X8(I(SrcReg | Stack, em_push)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) /* 0x58 - 0x5F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) X8(I(DstReg | Stack, em_pop)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) /* 0x60 - 0x67 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) I(ImplicitOps | Stack | No64, em_pusha),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) I(ImplicitOps | Stack | No64, em_popa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) N, MD(ModRM, &mode_dual_63),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) /* 0x68 - 0x6F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) I(SrcImm | Mov | Stack, em_push),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) I(SrcImmByte | Mov | Stack, em_push),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) /* 0x70 - 0x7F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) X16(D(SrcImmByte | NearBranch)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) /* 0x80 - 0x87 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) G(ByteOp | DstMem | SrcImm, group1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) G(DstMem | SrcImm, group1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) G(ByteOp | DstMem | SrcImm | No64, group1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) G(DstMem | SrcImmByte, group1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) /* 0x88 - 0x8F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) D(ModRM | SrcMem | NoAccess | DstReg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) G(0, group1A),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) /* 0x90 - 0x97 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) /* 0x98 - 0x9F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) I(SrcImmFAddr | No64, em_call_far), N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) II(ImplicitOps | Stack, em_pushf, pushf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) II(ImplicitOps | Stack, em_popf, popf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) /* 0xA0 - 0xA7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) /* 0xA8 - 0xAF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) F2bv(DstAcc | SrcImm | NoWrite, em_test),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) I2bv(SrcAcc | DstDI | Mov | String, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) I2bv(SrcSI | DstAcc | Mov | String, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) /* 0xB0 - 0xB7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) /* 0xB8 - 0xBF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) X8(I(DstReg | SrcImm64 | Mov, em_mov)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) /* 0xC0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) I(ImplicitOps | NearBranch, em_ret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) G(ByteOp, group11), G(0, group11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) /* 0xC8 - 0xCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) I(ImplicitOps | SrcImmU16, em_ret_far_imm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) I(ImplicitOps, em_ret_far),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) D(ImplicitOps), DI(SrcImmByte, intn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) /* 0xD0 - 0xD7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) G(Src2One | ByteOp, group2), G(Src2One, group2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) G(Src2CL | ByteOp, group2), G(Src2CL, group2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) I(DstAcc | SrcImmUByte | No64, em_aam),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) I(DstAcc | SrcImmUByte | No64, em_aad),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) F(DstAcc | ByteOp | No64, em_salc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) I(DstAcc | SrcXLat | ByteOp, em_mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) /* 0xD8 - 0xDF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) /* 0xE0 - 0xE7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) X3(I(SrcImmByte | NearBranch, em_loop)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) I(SrcImmByte | NearBranch, em_jcxz),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) /* 0xE8 - 0xEF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) I(SrcImmFAddr | No64, em_jmp_far),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) D(SrcImmByte | ImplicitOps | NearBranch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) /* 0xF0 - 0xF7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) N, DI(ImplicitOps, icebp), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) G(ByteOp, group3), G(0, group3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) /* 0xF8 - 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) D(ImplicitOps), D(ImplicitOps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) static const struct opcode twobyte_table[256] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) /* 0x00 - 0x0F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) G(0, group6), GD(0, &group7), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) N, I(ImplicitOps | EmulateOnUD, em_syscall),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) II(ImplicitOps | Priv, em_clts, clts), N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) /* 0x10 - 0x1F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) /* 0x20 - 0x2F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) check_cr_access),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) check_dr_write),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) /* 0x30 - 0x3F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) II(ImplicitOps | Priv, em_wrmsr, wrmsr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) II(ImplicitOps | Priv, em_rdmsr, rdmsr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) I(ImplicitOps | EmulateOnUD, em_sysenter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) /* 0x40 - 0x4F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) X16(D(DstReg | SrcMem | ModRM)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) /* 0x50 - 0x5F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) /* 0x60 - 0x6F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) /* 0x70 - 0x7F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) /* 0x80 - 0x8F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) X16(D(SrcImm | NearBranch)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) /* 0x90 - 0x9F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) /* 0xA0 - 0xA7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) II(ImplicitOps, em_cpuid, cpuid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) /* 0xA8 - 0xAF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) /* 0xB0 - 0xB7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) /* 0xB8 - 0xBF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) G(BitOp, group8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) I(DstReg | SrcMem | ModRM, em_bsf_c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) I(DstReg | SrcMem | ModRM, em_bsr_c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) /* 0xC0 - 0xC7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) N, ID(0, &instr_dual_0f_c3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) N, N, N, GD(0, &group9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) /* 0xC8 - 0xCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) X8(I(DstReg, em_bswap)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) /* 0xD0 - 0xDF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) /* 0xE0 - 0xEF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) N, N, N, N, N, N, N, N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) /* 0xF0 - 0xFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) static const struct instr_dual instr_dual_0f_38_f0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) I(DstReg | SrcMem | Mov, em_movbe), N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) static const struct instr_dual instr_dual_0f_38_f1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) I(DstMem | SrcReg | Mov, em_movbe), N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) static const struct gprefix three_byte_0f_38_f0 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) ID(0, &instr_dual_0f_38_f0), N, N, N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) static const struct gprefix three_byte_0f_38_f1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) ID(0, &instr_dual_0f_38_f1), N, N, N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) * Insns below are selected by the prefix which indexed by the third opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) * byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) static const struct opcode opcode_map_0f_38[256] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) /* 0x00 - 0x7f */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) /* 0x80 - 0xef */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) /* 0xf0 - 0xf1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) /* 0xf2 - 0xff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) N, N, X4(N), X8(N)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) #undef D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) #undef N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) #undef G
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) #undef GD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) #undef I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) #undef GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) #undef EXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) #undef MD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) #undef ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) #undef D2bv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) #undef D2bvIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) #undef I2bv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) #undef I2bvIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) #undef I6ALU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) unsigned size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) if (size == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) unsigned size, bool sign_extension)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) op->bytes = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) op->addr.mem.ea = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) /* NB. Immediates are sign-extended as necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) switch (op->bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) op->val = insn_fetch(s8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) op->val = insn_fetch(s16, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) op->val = insn_fetch(s32, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) op->val = insn_fetch(s64, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) if (!sign_extension) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) switch (op->bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) op->val &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) op->val &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) op->val &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) unsigned d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) switch (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) case OpReg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) decode_register_operand(ctxt, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) case OpImmUByte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) rc = decode_imm(ctxt, op, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) case OpMem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) mem_common:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) *op = ctxt->memop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) ctxt->memopp = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) if (ctxt->d & BitOp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) fetch_bit_operand(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) op->orig_val = op->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) case OpMem64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) goto mem_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) case OpAcc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) op->type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) fetch_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) op->orig_val = op->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) case OpAccLo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) op->type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) fetch_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) op->orig_val = op->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) case OpAccHi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) if (ctxt->d & ByteOp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) op->type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) op->type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) op->bytes = ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) fetch_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) op->orig_val = op->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) case OpDI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) op->type = OP_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) op->addr.mem.ea =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) register_address(ctxt, VCPU_REGS_RDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) op->addr.mem.seg = VCPU_SREG_ES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) op->val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) op->count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) case OpDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) op->type = OP_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) op->bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) fetch_register_operand(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) case OpCL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) op->bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) case OpImmByte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) rc = decode_imm(ctxt, op, 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) case OpOne:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) op->bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) op->val = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) case OpImm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) rc = decode_imm(ctxt, op, imm_size(ctxt), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) case OpImm64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) case OpMem8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) ctxt->memop.bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) if (ctxt->memop.type == OP_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) ctxt->memop.addr.reg = decode_register(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) ctxt->modrm_rm, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) fetch_register_operand(&ctxt->memop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) goto mem_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) case OpMem16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) ctxt->memop.bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) goto mem_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) case OpMem32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) ctxt->memop.bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) goto mem_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) case OpImmU16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) rc = decode_imm(ctxt, op, 2, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) case OpImmU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) rc = decode_imm(ctxt, op, imm_size(ctxt), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) case OpSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) op->type = OP_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) op->addr.mem.ea =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) register_address(ctxt, VCPU_REGS_RSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) op->addr.mem.seg = ctxt->seg_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) op->val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) op->count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) case OpXLat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) op->type = OP_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) op->addr.mem.ea =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) address_mask(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) reg_read(ctxt, VCPU_REGS_RBX) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) op->addr.mem.seg = ctxt->seg_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) op->val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) case OpImmFAddr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) op->addr.mem.ea = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) op->bytes = ctxt->op_bytes + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) insn_fetch_arr(op->valptr, op->bytes, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) case OpMemFAddr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) ctxt->memop.bytes = ctxt->op_bytes + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) goto mem_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) case OpES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) op->val = VCPU_SREG_ES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) case OpCS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) op->val = VCPU_SREG_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) case OpSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) op->val = VCPU_SREG_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) case OpDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) op->val = VCPU_SREG_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) case OpFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) op->val = VCPU_SREG_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) case OpGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) op->type = OP_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) op->val = VCPU_SREG_GS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) case OpImplicit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) /* Special instructions do their own operand decoding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) op->type = OP_NONE; /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) int mode = ctxt->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) bool op_prefix = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) bool has_seg_override = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) struct opcode opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) struct desc_struct desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) ctxt->memop.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) ctxt->memopp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) ctxt->_eip = ctxt->eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) ctxt->fetch.ptr = ctxt->fetch.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) ctxt->fetch.end = ctxt->fetch.data + insn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) ctxt->opcode_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) ctxt->intercept = x86_intercept_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) if (insn_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) memcpy(ctxt->fetch.data, insn, insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) rc = __do_insn_fetch_bytes(ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) case X86EMUL_MODE_REAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) case X86EMUL_MODE_VM86:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) def_op_bytes = def_ad_bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) if (desc.d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) def_op_bytes = def_ad_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) case X86EMUL_MODE_PROT16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) def_op_bytes = def_ad_bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) case X86EMUL_MODE_PROT32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) def_op_bytes = def_ad_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) case X86EMUL_MODE_PROT64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) def_op_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) def_ad_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) ctxt->op_bytes = def_op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) ctxt->ad_bytes = def_ad_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) /* Legacy prefixes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) switch (ctxt->b = insn_fetch(u8, ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) case 0x66: /* operand-size override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) op_prefix = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) /* switch between 2/4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) ctxt->op_bytes = def_op_bytes ^ 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) case 0x67: /* address-size override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) if (mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) /* switch between 4/8 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) ctxt->ad_bytes = def_ad_bytes ^ 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) /* switch between 2/4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) ctxt->ad_bytes = def_ad_bytes ^ 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) case 0x26: /* ES override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) ctxt->seg_override = VCPU_SREG_ES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) case 0x2e: /* CS override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) ctxt->seg_override = VCPU_SREG_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) case 0x36: /* SS override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) ctxt->seg_override = VCPU_SREG_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) case 0x3e: /* DS override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) ctxt->seg_override = VCPU_SREG_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) case 0x64: /* FS override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) ctxt->seg_override = VCPU_SREG_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) case 0x65: /* GS override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) ctxt->seg_override = VCPU_SREG_GS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) case 0x40 ... 0x4f: /* REX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) if (mode != X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) goto done_prefixes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) ctxt->rex_prefix = ctxt->b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) case 0xf0: /* LOCK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) ctxt->lock_prefix = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) case 0xf2: /* REPNE/REPNZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) case 0xf3: /* REP/REPE/REPZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) ctxt->rep_prefix = ctxt->b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) goto done_prefixes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) /* Any legacy prefix after a REX prefix nullifies its effect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) ctxt->rex_prefix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) done_prefixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) /* REX prefix. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) if (ctxt->rex_prefix & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) ctxt->op_bytes = 8; /* REX.W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) /* Opcode byte(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) opcode = opcode_table[ctxt->b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) /* Two-byte opcode? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) if (ctxt->b == 0x0f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) ctxt->opcode_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) ctxt->b = insn_fetch(u8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) opcode = twobyte_table[ctxt->b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) /* 0F_38 opcode map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) if (ctxt->b == 0x38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) ctxt->opcode_len = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) ctxt->b = insn_fetch(u8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) opcode = opcode_map_0f_38[ctxt->b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) ctxt->d = opcode.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) if (ctxt->d & ModRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) ctxt->modrm = insn_fetch(u8, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) /* vex-prefix instructions are not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) ctxt->d = NotImpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) while (ctxt->d & GroupMask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) switch (ctxt->d & GroupMask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) case Group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) goffset = (ctxt->modrm >> 3) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) opcode = opcode.u.group[goffset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) case GroupDual:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) goffset = (ctxt->modrm >> 3) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) if ((ctxt->modrm >> 6) == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) opcode = opcode.u.gdual->mod3[goffset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) opcode = opcode.u.gdual->mod012[goffset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) case RMExt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) goffset = ctxt->modrm & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) opcode = opcode.u.group[goffset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) case Prefix:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) if (ctxt->rep_prefix && op_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) switch (simd_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) case Escape:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) if (ctxt->modrm > 0xbf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) size_t size = ARRAY_SIZE(opcode.u.esc->high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) u32 index = array_index_nospec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) ctxt->modrm - 0xc0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) opcode = opcode.u.esc->high[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) case InstrDual:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) if ((ctxt->modrm >> 6) == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) opcode = opcode.u.idual->mod3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) opcode = opcode.u.idual->mod012;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) case ModeDual:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) if (ctxt->mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) opcode = opcode.u.mdual->mode64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) opcode = opcode.u.mdual->mode32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) ctxt->d &= ~(u64)GroupMask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) ctxt->d |= opcode.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) /* Unrecognised? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) if (ctxt->d == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) ctxt->execute = opcode.u.execute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) if (unlikely(ctxt->d &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) No16))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) * These are copied unconditionally here, and checked unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) * in x86_emulate_insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) ctxt->check_perm = opcode.check_perm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) ctxt->intercept = opcode.intercept;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) if (ctxt->d & NotImpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) if (mode == X86EMUL_MODE_PROT64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) ctxt->op_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) else if (ctxt->d & NearBranch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) ctxt->op_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) if (ctxt->d & Op3264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) if (mode == X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) ctxt->op_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) ctxt->op_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) if ((ctxt->d & No16) && ctxt->op_bytes == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) ctxt->op_bytes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) if (ctxt->d & Sse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) ctxt->op_bytes = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) else if (ctxt->d & Mmx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) ctxt->op_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) /* ModRM and SIB bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) if (ctxt->d & ModRM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) rc = decode_modrm(ctxt, &ctxt->memop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) if (!has_seg_override) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) has_seg_override = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) ctxt->seg_override = ctxt->modrm_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) } else if (ctxt->d & MemAbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) rc = decode_abs(ctxt, &ctxt->memop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) if (!has_seg_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) ctxt->seg_override = VCPU_SREG_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) ctxt->memop.addr.mem.seg = ctxt->seg_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) * Decode and fetch the source operand: register, memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) * or immediate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) * Decode and fetch the second source operand: register, memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) * or immediate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) /* Decode and fetch the destination operand: register or memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) if (ctxt->rip_relative && likely(ctxt->memopp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) ctxt->memopp->addr.mem.ea = address_mask(ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) ctxt->memopp->addr.mem.ea + ctxt->_eip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) if (rc == X86EMUL_PROPAGATE_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) ctxt->have_exception = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) return ctxt->d & PageTable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) /* The second termination condition only applies for REPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) * and REPNE. Test if the repeat string operation prefix is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) * corresponding termination condition according to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) * - if REPE/REPZ and ZF = 0 then done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) * - if REPNE/REPNZ and ZF = 1 then done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) (ctxt->b == 0xae) || (ctxt->b == 0xaf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) && (((ctxt->rep_prefix == REPE_PREFIX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) || ((ctxt->rep_prefix == REPNE_PREFIX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) emulator_get_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) rc = asm_safe("fwait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) emulator_put_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) if (unlikely(rc != X86EMUL_CONTINUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) return emulate_exception(ctxt, MF_VECTOR, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) static void fetch_possible_mmx_operand(struct operand *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) if (op->type == OP_MM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) read_mmx_reg(&op->mm_val, op->addr.mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) if (!(ctxt->d & ByteOp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) : "c"(ctxt->src2.val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) if (!fop) /* exception is returned in fop variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) return emulate_de(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) return X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) void init_decode_cache(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) memset(&ctxt->rip_relative, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) ctxt->io_read.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) ctxt->io_read.end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) ctxt->mem_read.end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) const struct x86_emulate_ops *ops = ctxt->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) int rc = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) int saved_dst_type = ctxt->dst.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) unsigned emul_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) ctxt->mem_read.pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) /* LOCK prefix is allowed only with some instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) rc = emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) rc = emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) emul_flags = ctxt->ops->get_hflags(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) if (unlikely(ctxt->d &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) (ctxt->d & Undefined)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) rc = emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) rc = emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) rc = emulate_nm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) if (ctxt->d & Mmx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) rc = flush_pending_x87_faults(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) * Now that we know the fpu is exception safe, we can fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) * operands from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) fetch_possible_mmx_operand(&ctxt->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) fetch_possible_mmx_operand(&ctxt->src2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) if (!(ctxt->d & Mov))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) fetch_possible_mmx_operand(&ctxt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) rc = emulator_check_intercept(ctxt, ctxt->intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) X86_ICPT_PRE_EXCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) /* Instruction can only be executed in protected mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) rc = emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) /* Privileged instruction can be executed only in CPL=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) if (ctxt->d & PrivUD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) rc = emulate_ud(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) rc = emulate_gp(ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) /* Do instruction specific permission checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) if (ctxt->d & CheckPerm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) rc = ctxt->check_perm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) rc = emulator_check_intercept(ctxt, ctxt->intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) X86_ICPT_POST_EXCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) if (ctxt->rep_prefix && (ctxt->d & String)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) /* All REP prefixes have the same first termination condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) string_registers_quirk(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) ctxt->eip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) ctxt->eflags &= ~X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) rc = segmented_read(ctxt, ctxt->src.addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) ctxt->src.valptr, ctxt->src.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) ctxt->src.orig_val64 = ctxt->src.val64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) if (ctxt->src2.type == OP_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) rc = segmented_read(ctxt, ctxt->src2.addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) &ctxt->src2.val, ctxt->src2.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) if ((ctxt->d & DstMask) == ImplicitOps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) goto special_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) /* optimisation - avoid slow emulated read if Mov */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) rc = segmented_read(ctxt, ctxt->dst.addr.mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) &ctxt->dst.val, ctxt->dst.bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) if (rc != X86EMUL_CONTINUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) if (!(ctxt->d & NoWrite) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) rc == X86EMUL_PROPAGATE_FAULT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) ctxt->exception.vector == PF_VECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) ctxt->exception.error_code |= PFERR_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) /* Copy full 64-bit value for CMPXCHG8B. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) ctxt->dst.orig_val64 = ctxt->dst.val64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) special_insn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) rc = emulator_check_intercept(ctxt, ctxt->intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) X86_ICPT_POST_MEMACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) if (ctxt->rep_prefix && (ctxt->d & String))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) ctxt->eflags |= X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) ctxt->eflags &= ~X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) if (ctxt->execute) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) if (ctxt->d & Fastop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) rc = fastop(ctxt, ctxt->fop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) rc = ctxt->execute(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) goto writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) if (ctxt->opcode_len == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) goto twobyte_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) else if (ctxt->opcode_len == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) goto threebyte_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) switch (ctxt->b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) case 0x70 ... 0x7f: /* jcc (short) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) if (test_cc(ctxt->b, ctxt->eflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) rc = jmp_rel(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) case 0x8d: /* lea r16/r32, m */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) ctxt->dst.val = ctxt->src.addr.mem.ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) case 0x90 ... 0x97: /* nop / xchg reg, rax */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) ctxt->dst.type = OP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) rc = em_xchg(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) case 0x98: /* cbw/cwde/cdqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) switch (ctxt->op_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) case 0xcc: /* int3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) rc = emulate_int(ctxt, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) case 0xcd: /* int n */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) rc = emulate_int(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) case 0xce: /* into */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) if (ctxt->eflags & X86_EFLAGS_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) rc = emulate_int(ctxt, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) case 0xe9: /* jmp rel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) case 0xeb: /* jmp rel short */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) rc = jmp_rel(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) ctxt->dst.type = OP_NONE; /* Disable writeback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) case 0xf4: /* hlt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) ctxt->ops->halt(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) case 0xf5: /* cmc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) /* complement carry flag from eflags reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) ctxt->eflags ^= X86_EFLAGS_CF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) case 0xf8: /* clc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) ctxt->eflags &= ~X86_EFLAGS_CF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) case 0xf9: /* stc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) ctxt->eflags |= X86_EFLAGS_CF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) case 0xfc: /* cld */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) ctxt->eflags &= ~X86_EFLAGS_DF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) case 0xfd: /* std */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) ctxt->eflags |= X86_EFLAGS_DF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) goto cannot_emulate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) writeback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) if (ctxt->d & SrcWrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) rc = writeback(ctxt, &ctxt->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) if (!(ctxt->d & NoWrite)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) rc = writeback(ctxt, &ctxt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) * restore dst type in case the decoding will be reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) * (happens for string instruction )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) ctxt->dst.type = saved_dst_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) if ((ctxt->d & SrcMask) == SrcSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) if ((ctxt->d & DstMask) == DstDI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) if (ctxt->rep_prefix && (ctxt->d & String)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) struct read_cache *r = &ctxt->io_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) if ((ctxt->d & SrcMask) == SrcSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) count = ctxt->src.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) count = ctxt->dst.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) register_address_increment(ctxt, VCPU_REGS_RCX, -count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) if (!string_insn_completed(ctxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) * Re-enter guest when pio read ahead buffer is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) * or, if it is not used, after each 1024 iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) (r->end == 0 || r->end != r->pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) * Reset read cache. Usually happens before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) * decode, but since instruction is restarted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) * we have to do it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) ctxt->mem_read.end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) writeback_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) return EMULATION_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) goto done; /* skip rip writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) ctxt->eflags &= ~X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) ctxt->eip = ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) if (ctxt->mode != X86EMUL_MODE_PROT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) ctxt->eip = (u32)ctxt->_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) if (rc == X86EMUL_PROPAGATE_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) WARN_ON(ctxt->exception.vector > 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) ctxt->have_exception = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) if (rc == X86EMUL_INTERCEPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) return EMULATION_INTERCEPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) if (rc == X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) writeback_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) twobyte_insn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) switch (ctxt->b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) case 0x09: /* wbinvd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) (ctxt->ops->wbinvd)(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) case 0x08: /* invd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) case 0x0d: /* GrpP (prefetch) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) case 0x18: /* Grp16 (prefetch/nop) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) case 0x1f: /* nop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) case 0x20: /* mov cr, reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) case 0x21: /* mov from dr to reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) case 0x40 ... 0x4f: /* cmov */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) if (test_cc(ctxt->b, ctxt->eflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) ctxt->dst.val = ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) else if (ctxt->op_bytes != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) ctxt->dst.type = OP_NONE; /* no writeback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) case 0x80 ... 0x8f: /* jnz rel, etc*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) if (test_cc(ctxt->b, ctxt->eflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) rc = jmp_rel(ctxt, ctxt->src.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) case 0x90 ... 0x9f: /* setcc r/m8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) case 0xb6 ... 0xb7: /* movzx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) ctxt->dst.bytes = ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) : (u16) ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) case 0xbe ... 0xbf: /* movsx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) ctxt->dst.bytes = ctxt->op_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) (s16) ctxt->src.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) goto cannot_emulate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) threebyte_insn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) if (rc != X86EMUL_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) goto writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) cannot_emulate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) return EMULATION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) invalidate_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) writeback_registers(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) if (ctxt->rep_prefix && (ctxt->d & String))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) if (ctxt->d & TwoMemOp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) }