^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Licensed under the GPL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/ptrace-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <os.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <skas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <sysdep/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * If needed we can detect when it's uninitialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * These are initialized in an initcall and unchanged thereafter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int host_supports_tls = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) int host_gdt_entry_tls_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int do_set_thread_area(struct user_desc *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) ret = os_set_thread_area(info, userspace_pid[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) "index = %d\n", ret, info->entry_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int do_get_thread_area(struct user_desc *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u32 cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ret = os_get_thread_area(info, userspace_pid[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) "index = %d\n", ret, info->entry_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * sys_get_thread_area: get a yet unused TLS descriptor index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * XXX: Consider leaving one free slot for glibc usage at first place. This must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Also, this must be tested when compiling in SKAS mode with dynamic linking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * and running against NPTL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int get_free_idx(struct task_struct* task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct thread_struct *t = &task->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!t->arch.tls_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return GDT_ENTRY_TLS_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!t->arch.tls_array[idx].present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return idx + GDT_ENTRY_TLS_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline void clear_user_desc(struct user_desc* info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Postcondition: LDT_empty(info) returns true. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * indeed an empty user_desc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) info->read_exec_only = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) info->seg_not_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define O_FORCE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static int load_TLS(int flags, struct task_struct *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct uml_tls_struct* curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Actually, now if it wasn't flushed it gets cleared and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * flushed to the host, which will clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!curr->present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!curr->flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) clear_user_desc(&curr->tls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) curr->tls.entry_number = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) WARN_ON(!LDT_empty(&curr->tls));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!(flags & O_FORCE) && curr->flushed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ret = do_set_thread_area(&curr->tls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) curr->flushed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Verify if we need to do a flush for the new process, i.e. if there are any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * present desc's, only if they haven't been flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline int needs_TLS_update(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct uml_tls_struct* curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Can't test curr->present, we may need to clear a descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * which had a value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (curr->flushed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * On a newly forked process, the TLS descriptors haven't yet been flushed. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * we mark them as such and the first switch_to will do the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void clear_flushed_tls(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct uml_tls_struct* curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Still correct to do this, if it wasn't present on the host it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * will remain as flushed as it was.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!curr->present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) curr->flushed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * common host process. So this is needed in SKAS0 too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * However, if each thread had a different host process (and this was discussed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * for SMP support) this won't be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * And this will not need be used when (and if) we'll add support to the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * SKAS patch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int arch_switch_tls(struct task_struct *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!host_supports_tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * We have no need whatsoever to switch TLS for kernel threads; beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * that, that would also result in us calling os_set_thread_area with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * userspace_pid[cpu] == 0, which gives an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (likely(to->mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return load_TLS(O_FORCE, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int set_tls_entry(struct task_struct* task, struct user_desc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int idx, int flushed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct thread_struct *t = &task->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int arch_set_tls(struct task_struct *new, unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct user_desc info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int idx, ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (LDT_empty(&info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) idx = info.entry_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = set_tls_entry(new, &info, idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int get_tls_entry(struct task_struct *task, struct user_desc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct thread_struct *t = &task->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!t->arch.tls_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Temporary debugging check, to make sure that things have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * flushed. This could be triggered if load_TLS() failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (unlikely(task == current &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) printk(KERN_ERR "get_tls_entry: task with pid %d got here "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) "without flushed TLS.", current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * When the TLS entry has not been set, the values read to user in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * tls_array are 0 (because it's cleared at boot, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) clear_user_desc(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) info->entry_number = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct user_desc info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!host_supports_tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (copy_from_user(&info, user_desc, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) idx = info.entry_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) idx = get_free_idx(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) info.entry_number = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Tell the user which slot we chose for him.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (put_user(idx, &user_desc->entry_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ret = do_set_thread_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return set_tls_entry(current, &info, idx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Perform set_thread_area on behalf of the traced child.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * Note: error handling is not done on the deferred load, and this differ from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * i386. However the only possible error are caused by bugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int ptrace_set_thread_area(struct task_struct *child, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct user_desc __user *user_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct user_desc info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!host_supports_tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (copy_from_user(&info, user_desc, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return set_tls_entry(child, &info, idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct user_desc info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!host_supports_tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (get_user(idx, &user_desc->entry_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ret = get_tls_entry(current, &info, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (copy_to_user(user_desc, &info, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Perform get_thread_area on behalf of the traced child.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int ptrace_get_thread_area(struct task_struct *child, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct user_desc __user *user_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct user_desc info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!host_supports_tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret = get_tls_entry(child, &info, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (copy_to_user(user_desc, &info, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * This code is really i386-only, but it detects and logs x86_64 GDT indexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * if a 32-bit UML is running on a 64-bit host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int __init __setup_host_supports_tls(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (host_supports_tls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) printk(KERN_INFO "Host TLS support detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) printk(KERN_INFO "Detected host type: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) switch (host_gdt_entry_tls_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) case GDT_ENTRY_TLS_MIN_I386:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) printk(KERN_CONT "i386");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case GDT_ENTRY_TLS_MIN_X86_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) printk(KERN_CONT "x86_64");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) printk(KERN_CONT " (GDT indexes %d to %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) host_gdt_entry_tls_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) printk(KERN_ERR " Host TLS support NOT detected! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) "TLS support inside UML will not work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) __initcall(__setup_host_supports_tls);