^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002 Andi Kleen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This handles calls from both 32bit and 64bit mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Lock order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * contex.ldt_usr_sem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * mmap_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * context.lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/ldt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/pgtable_areas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* This is a multiple of PAGE_SIZE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void *ldt_slot_va(int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void load_mm_ldt(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct ldt_struct *ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* READ_ONCE synchronizes with smp_store_release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ldt = READ_ONCE(mm->context.ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Any change to mm->context.ldt is followed by an IPI to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * CPUs with the mm active. The LDT will not be freed until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * after the IPI is handled by all such CPUs. This means that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * if the ldt_struct changes before we return, the values we see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * will be safe, and the new values will be loaded before we run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * any user code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * NB: don't try to convert this to use RCU without extreme care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * We would still need IRQs off, because we don't want to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * the local LDT after an IPI loaded a newer value than the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * that we can see.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (unlikely(ldt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (static_cpu_has(X86_FEATURE_PTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Whoops -- either the new LDT isn't mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * (if slot == -1) or is mapped into a bogus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * slot (if slot > 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) clear_LDT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * If page table isolation is enabled, ldt->entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * will not be mapped in the userspace pagetables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Tell the CPU to access the LDT through the alias
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * at ldt_slot_va(ldt->slot).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) set_ldt(ldt->entries, ldt->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) clear_LDT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Load the LDT if either the old or new mm had an LDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * An mm will never go from having an LDT to not having an LDT. Two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * mms never share an LDT, so we don't gain anything by checking to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * see whether the LDT changed. There's also no guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * then prev->context.ldt will also be non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * If we really cared, we could optimize the case where prev == next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * and we're exiting lazy mode. Most of the time, if this happens,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * we don't actually need to reload LDTR, but modify_ldt() is mostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * used by legacy code and emulators where we don't need this level of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * This uses | instead of || because it generates better code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (unlikely((unsigned long)prev->context.ldt |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (unsigned long)next->context.ldt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) load_mm_ldt(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) DEBUG_LOCKS_WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void refresh_ldt_segments(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned short sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Make sure that the cached DS and ES descriptors match the updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * LDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) savesegment(ds, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) loadsegment(ds, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) savesegment(es, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) loadsegment(es, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* context.lock is held by the task which issued the smp function call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void flush_ldt(void *__mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct mm_struct *mm = __mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) load_mm_ldt(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) refresh_ldt_segments();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct ldt_struct *new_ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned int alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (num_entries > LDT_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!new_ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) alloc_size = num_entries * LDT_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Xen is very picky: it requires a page-aligned LDT that has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * trailing nonzero bytes in any page that contains LDT descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Keep it simple: zero the whole allocation and never allocate less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * than PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (alloc_size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) new_ldt->entries = vzalloc(alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!new_ldt->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) kfree(new_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* The new LDT isn't aliased for PTI yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) new_ldt->slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) new_ldt->nr_entries = num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return new_ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void do_sanity_check(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bool had_kernel_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bool had_user_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (mm->context.ldt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * We already had an LDT. The top-level entry should already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * have been allocated and synchronized with the usermode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) WARN_ON(!had_kernel_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) WARN_ON(!had_user_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * This is the first time we're mapping an LDT for this process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Sync the pgd to the usermode tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) WARN_ON(had_kernel_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) WARN_ON(had_user_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #ifdef CONFIG_X86_PAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (pgd->pgd == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) p4d = p4d_offset(pgd, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (p4d_none(*p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pud = pud_offset(p4d, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (pud_none(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return pmd_offset(pud, va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void map_ldt_struct_to_user(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pmd_t *k_pmd, *u_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) set_pmd(u_pmd, *k_pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void sanity_check_ldt_mapping(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bool had_kernel, had_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pmd_t *k_pmd, *u_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) had_kernel = (k_pmd->pmd != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) had_user = (u_pmd->pmd != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) do_sanity_check(mm, had_kernel, had_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #else /* !CONFIG_X86_PAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void map_ldt_struct_to_user(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) set_pgd(kernel_to_user_pgdp(pgd), *pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void sanity_check_ldt_mapping(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) bool had_kernel = (pgd->pgd != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) do_sanity_check(mm, had_kernel, had_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #endif /* CONFIG_X86_PAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * If PTI is enabled, this maps the LDT into the kernelmode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * usermode tables for the given mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bool is_vmalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int i, nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Any given ldt_struct should have map_ldt_struct() called at most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) WARN_ON(ldt->slot != -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Check if the current mappings are sane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sanity_check_ldt_mapping(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) is_vmalloc = is_vmalloc_addr(ldt->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long offset = i << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) const void *src = (char *)ldt->entries + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pgprot_t pte_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pte_t pte, *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) va = (unsigned long)ldt_slot_va(slot) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pfn = is_vmalloc ? vmalloc_to_pfn(src) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) page_to_pfn(virt_to_page(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * Treat the PTI LDT range as a *userspace* range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * get_locked_pte() will allocate all needed pagetables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * and account for them in this mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ptep = get_locked_pte(mm, va, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Map it RO so the easy to find address is not a primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * target via some kernel interface which misses a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * permission check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Filter out unsuppored __PAGE_KERNEL* bits: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pgprot_val(pte_prot) &= __supported_pte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pte = pfn_pte(pfn, pte_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) set_pte_at(mm, va, ptep, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Propagate LDT mapping to the user page-table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) map_ldt_struct_to_user(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ldt->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int i, nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* LDT map/unmap is only required for PTI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned long offset = i << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spinlock_t *ptl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ptep = get_locked_pte(mm, va, &ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pte_clear(mm, va, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) pte_unmap_unlock(ptep, ptl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) va = (unsigned long)ldt_slot_va(ldt->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #else /* !CONFIG_PAGE_TABLE_ISOLATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #endif /* CONFIG_PAGE_TABLE_ISOLATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void free_ldt_pgtables(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #ifdef CONFIG_PAGE_TABLE_ISOLATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct mmu_gather tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long start = LDT_BASE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned long end = LDT_END_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) tlb_gather_mmu(&tlb, mm, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) free_pgd_range(&tlb, start, end, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) tlb_finish_mmu(&tlb, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* After calling this, the LDT is immutable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void finalize_ldt_struct(struct ldt_struct *ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) mutex_lock(&mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Synchronizes with READ_ONCE in load_mm_ldt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) smp_store_release(&mm->context.ldt, ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Activate the LDT for all CPUs using currents mm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mutex_unlock(&mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void free_ldt_struct(struct ldt_struct *ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (likely(!ldt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) paravirt_free_ldt(ldt->entries, ldt->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) vfree_atomic(ldt->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) free_page((unsigned long)ldt->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) kfree(ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * the new task is not running, so nothing can be installed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct ldt_struct *new_ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!old_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mutex_lock(&old_mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!old_mm->context.ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!new_ldt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) memcpy(new_ldt->entries, old_mm->context.ldt->entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) new_ldt->nr_entries * LDT_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) finalize_ldt_struct(new_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) retval = map_ldt_struct(mm, new_ldt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) free_ldt_pgtables(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) free_ldt_struct(new_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) mm->context.ldt = new_ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mutex_unlock(&old_mm->context.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * No need to lock the MM as we are the last user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * 64bit: Don't touch the LDT register - we're already in the next thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void destroy_context_ldt(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) free_ldt_struct(mm->context.ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mm->context.ldt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void ldt_arch_exit_mmap(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) free_ldt_pgtables(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int read_ldt(void __user *ptr, unsigned long bytecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned long entries_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) down_read(&mm->context.ldt_usr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!mm->context.ldt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (entries_size > bytecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) entries_size = bytecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (entries_size != bytecount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Zero-fill the rest and pretend we read bytecount bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (clear_user(ptr + entries_size, bytecount - entries_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) retval = bytecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) up_read(&mm->context.ldt_usr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int read_default_ldt(void __user *ptr, unsigned long bytecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* CHECKME: Can we use _one_ random number ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned long size = 5 * sizeof(struct desc_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned long size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (bytecount > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) bytecount = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (clear_user(ptr, bytecount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return bytecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static bool allow_16bit_segments(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!IS_ENABLED(CONFIG_X86_16BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #ifdef CONFIG_XEN_PV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Xen PV does not implement ESPFIX64, which means that 16-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * segments will not work correctly. Until either Xen PV implements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * ESPFIX64 and can signal this fact to the guest or unless someone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * provides compelling evidence that allowing broken 16-bit segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * is worthwhile, disallow 16-bit segments under Xen PV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (xen_pv_domain()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct ldt_struct *new_ldt, *old_ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) unsigned int old_nr_entries, new_nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct user_desc ldt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct desc_struct ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (bytecount != sizeof(ldt_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (ldt_info.entry_number >= LDT_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (ldt_info.contents == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (oldmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ldt_info.seg_not_present == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) LDT_empty(&ldt_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* The user wants to clear the entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) memset(&ldt, 0, sizeof(ldt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) fill_ldt(&ldt, &ldt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (oldmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ldt.avl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (down_write_killable(&mm->context.ldt_usr_sem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) old_ldt = mm->context.ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) new_ldt = alloc_ldt_struct(new_nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!new_ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (old_ldt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) new_ldt->entries[ldt_info.entry_number] = ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) finalize_ldt_struct(new_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * If we are using PTI, map the new LDT into the userspace pagetables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * If there is already an LDT, use the other slot so that other CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * will continue to use the old LDT until install_ldt() switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * them over to the new LDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * This only can fail for the first LDT setup. If an LDT is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * already installed then the PTE page is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * populated. Mop up a half populated page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!WARN_ON_ONCE(old_ldt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) free_ldt_pgtables(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) free_ldt_struct(new_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) install_ldt(mm, new_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unmap_ldt_struct(mm, old_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) free_ldt_struct(old_ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) up_write(&mm->context.ldt_usr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unsigned long , bytecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) switch (func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ret = read_ldt(ptr, bytecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ret = write_ldt(ptr, bytecount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ret = read_default_ldt(ptr, bytecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) case 0x11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ret = write_ldt(ptr, bytecount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * The SYSCALL_DEFINE() macros give us an 'unsigned long'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * return type, but tht ABI for sys_modify_ldt() expects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * 'int'. This cast gives us an int-sized value in %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * for the return code. The 'unsigned' is necessary so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * the compiler does not try to sign-extend the negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * return codes into the high half of the register when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * taking the value from int->long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return (unsigned int)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }