^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * x86 single-step support code, common to 32-bit and 64-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long addr, seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) addr = regs->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) seg = regs->cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (v8086_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) addr = (addr & 0xffff) + (seg << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifdef CONFIG_MODIFY_LDT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * We'll assume that the code segments in the GDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * are all zero-based. That is largely true: the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * TLS segments are used for data, and the PNPBIOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * and APM bios ones we just ignore here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct desc_struct *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) seg >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) mutex_lock(&child->mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (unlikely(!child->mm->context.ldt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) seg >= child->mm->context.ldt->nr_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) addr = -1L; /* bogus selector, access would fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) desc = &child->mm->context.ldt->entries[seg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) base = get_desc_base(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* 16-bit code segment? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!desc->d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) addr &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) addr += base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mutex_unlock(&child->mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int i, copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned char opcode[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long addr = convert_ip_to_linear(child, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) copied = access_process_vm(child, addr, opcode, sizeof(opcode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) for (i = 0; i < copied; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) switch (opcode[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* popf and iret */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case 0x9d: case 0xcf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* CHECKME: 64 65 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* opcode and address size prefixes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) case 0x66: case 0x67:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* irrelevant prefixes (segment overrides and repeats) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) case 0x26: case 0x2e:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) case 0x36: case 0x3e:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) case 0x64: case 0x65:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case 0xf0: case 0xf2: case 0xf3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) case 0x40 ... 0x4f:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!user_64bit_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* 32-bit mode: register increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* 64-bit mode: REX prefix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* CHECKME: f2, f3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * pushf: NOTE! We should probably not let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * the user see the TF bit being set. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * it's more pain than it's worth to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * it, and a debugger could emulate this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * all in user space if it _really_ cares.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) case 0x9c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Enable single-stepping. Return nonzero if user mode is not using TF itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int enable_single_step(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct pt_regs *regs = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long oflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * If we stepped into a sysenter/syscall insn, it trapped in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * If user-mode had set TF itself, then it's still clear from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * do_debug() and we need to set it again to restore the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * state so we don't wrongly set TIF_FORCED_TF below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * If enable_single_step() was used last and that is what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * already set and our bookkeeping is fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) regs->flags |= X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Always set TIF_SINGLESTEP - this guarantees that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * we single-step system calls etc.. This will also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * cause us to set TF when returning to user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) set_tsk_thread_flag(child, TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) oflags = regs->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Set TF on the kernel stack.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) regs->flags |= X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * ..but if TF is changed by the instruction we will trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * don't mark it as being "us" that set it, so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * won't clear it by hand later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Note that if we don't actually execute the popf because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * of a signal arriving right now or suchlike, we will lose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * track of the fact that it really was "us" that set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (is_setting_trap_flag(child, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) clear_tsk_thread_flag(child, TIF_FORCED_TF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * If TF was already set, check whether it was us who set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * If not, we should never attempt a block step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (oflags & X86_EFLAGS_TF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return test_tsk_thread_flag(child, TIF_FORCED_TF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) set_tsk_thread_flag(child, TIF_FORCED_TF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void set_task_blockstep(struct task_struct *task, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long debugctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Ensure irq/preemption can't change debugctl in between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Note also that both TIF_BLOCKSTEP and debugctl should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * be changed atomically wrt preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * task is current or it can't be running, otherwise we can race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * PTRACE_KILL is not safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) debugctl = get_debugctlmsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) debugctl |= DEBUGCTLMSR_BTF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) set_tsk_thread_flag(task, TIF_BLOCKSTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) debugctl &= ~DEBUGCTLMSR_BTF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) update_debugctlmsr(debugctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Enable single or block step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void enable_step(struct task_struct *child, bool block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Make sure block stepping (BTF) is not enabled unless it should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Note that we don't try to worry about any is_setting_trap_flag()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * instructions after the first when using block stepping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * So no one should try to use debugger block stepping in a program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * that uses user-mode single stepping itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (enable_single_step(child) && block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) set_task_blockstep(child, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) set_task_blockstep(child, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void user_enable_single_step(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) enable_step(child, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void user_enable_block_step(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) enable_step(child, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void user_disable_single_step(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Make sure block stepping (BTF) is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) set_task_blockstep(child, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Always clear TIF_SINGLESTEP... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) clear_tsk_thread_flag(child, TIF_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* But touch TF only if it was set by us.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }