^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <as-layout.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <mem_user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <os.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <skas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <kern_util.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct host_vm_change {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct host_vm_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) enum { NONE, MMAP, MUNMAP, MPROTECT } type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) } mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) } munmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned int prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) } mprotect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) } ops[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int userspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define INIT_HVC(mm, force, userspace) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ((struct host_vm_change) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) { .ops = { { .type = NONE } }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .mm = mm, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .data = NULL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .userspace = userspace, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .index = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .force = force })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void report_enomem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) printk(KERN_ERR "UML ran out of memory on the host side! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "This can happen due to a memory limitation or "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "vm.max_map_count has been reached.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int do_ops(struct host_vm_change *hvc, int end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int finished)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct host_vm_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) for (i = 0; i < end && !ret; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) op = &hvc->ops[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) switch (op->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) case MMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (hvc->userspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret = map(&hvc->mm->context.id, op->u.mmap.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) op->u.mmap.len, op->u.mmap.prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) op->u.mmap.fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) op->u.mmap.offset, finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) &hvc->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) map_memory(op->u.mmap.addr, op->u.mmap.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) op->u.mmap.len, 1, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) case MUNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (hvc->userspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ret = unmap(&hvc->mm->context.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) op->u.munmap.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) op->u.munmap.len, finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) &hvc->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ret = os_unmap_memory(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) (void *) op->u.munmap.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) op->u.munmap.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) case MPROTECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (hvc->userspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ret = protect(&hvc->mm->context.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) op->u.mprotect.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) op->u.mprotect.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) op->u.mprotect.prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) finished, &hvc->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ret = os_protect_memory(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (void *) op->u.mprotect.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) op->u.mprotect.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 1, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) printk(KERN_ERR "Unknown op type %d in do_ops\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) op->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (ret == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) report_enomem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int prot, struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct host_vm_op *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int fd = -1, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (virt + len > STUB_START && virt < STUB_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (hvc->userspace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) fd = phys_mapping(phys, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) offset = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (hvc->index != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) last = &hvc->ops[hvc->index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if ((last->type == MMAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) (last->u.mmap.addr + last->u.mmap.len == virt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (last->u.mmap.offset + last->u.mmap.len == offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) last->u.mmap.len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (hvc->index == ARRAY_SIZE(hvc->ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) hvc->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) hvc->ops[hvc->index++] = ((struct host_vm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) { .type = MMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) .u = { .mmap = { .addr = virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) .len = len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .prot = prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .fd = fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .offset = offset }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) } });
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int add_munmap(unsigned long addr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct host_vm_op *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (addr + len > STUB_START && addr < STUB_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (hvc->index != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) last = &hvc->ops[hvc->index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if ((last->type == MUNMAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (last->u.munmap.addr + last->u.mmap.len == addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) last->u.munmap.len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (hvc->index == ARRAY_SIZE(hvc->ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) hvc->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) hvc->ops[hvc->index++] = ((struct host_vm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) { .type = MUNMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .u = { .munmap = { .addr = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .len = len } } });
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int add_mprotect(unsigned long addr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int prot, struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct host_vm_op *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (addr + len > STUB_START && addr < STUB_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (hvc->index != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) last = &hvc->ops[hvc->index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if ((last->type == MPROTECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) (last->u.mprotect.prot == prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) last->u.mprotect.len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (hvc->index == ARRAY_SIZE(hvc->ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) hvc->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) hvc->ops[hvc->index++] = ((struct host_vm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) { .type = MPROTECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .u = { .mprotect = { .addr = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .len = len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .prot = prot } } });
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int r, w, x, prot, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pte = pte_offset_kernel(pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if ((addr >= STUB_START) && (addr < STUB_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) r = pte_read(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) w = pte_write(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) x = pte_exec(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!pte_young(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) } else if (!pte_dirty(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) (x ? UM_PROT_EXEC : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (hvc->force || pte_newpage(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (pte_present(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (pte_newpage(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) PAGE_SIZE, prot, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ret = add_munmap(addr, PAGE_SIZE, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else if (pte_newprot(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *pte = pte_mkuptodate(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline int update_pmd_range(pud_t *pud, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!pmd_present(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (hvc->force || pmd_newpage(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = add_munmap(addr, next - addr, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pmd_mkuptodate(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) else ret = update_pte_range(pmd, addr, next, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } while (pmd++, addr = next, ((addr < end) && !ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!pud_present(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (hvc->force || pud_newpage(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ret = add_munmap(addr, next - addr, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pud_mkuptodate(*pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) else ret = update_pmd_range(pud, addr, next, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) } while (pud++, addr = next, ((addr < end) && !ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct host_vm_change *hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!p4d_present(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (hvc->force || p4d_newpage(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = add_munmap(addr, next - addr, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) p4d_mkuptodate(*p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = update_pud_range(p4d, addr, next, hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } while (p4d++, addr = next, ((addr < end) && !ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long end_addr, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct host_vm_change hvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned long addr = start_addr, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int ret = 0, userspace = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) hvc = INIT_HVC(mm, force, userspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) next = pgd_addr_end(addr, end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!pgd_present(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (force || pgd_newpage(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = add_munmap(addr, next - addr, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pgd_mkuptodate(*pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = update_p4d_range(pgd, addr, next, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) } while (pgd++, addr = next, ((addr < end_addr) && !ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ret = do_ops(&hvc, hvc.index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* This is not an else because ret is modified above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct mm_id *mm_idp = ¤t->mm->context.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) printk(KERN_ERR "fix_range_common: failed, killing current "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) "process: %d\n", task_tgid_vnr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mm_idp->kill = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long addr, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int updated = 0, err = 0, force = 0, userspace = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct host_vm_change hvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) hvc = INIT_HVC(mm, force, userspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) for (addr = start; addr < end;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!pgd_present(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) last = ADD_ROUND(addr, PGDIR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (last > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) last = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (pgd_newpage(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) updated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) err = add_munmap(addr, last - addr, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) panic("munmap failed, errno = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) addr = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!p4d_present(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) last = ADD_ROUND(addr, P4D_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (last > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) last = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (p4d_newpage(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) updated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) err = add_munmap(addr, last - addr, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) panic("munmap failed, errno = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) addr = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!pud_present(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) last = ADD_ROUND(addr, PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (last > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) last = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (pud_newpage(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) updated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) err = add_munmap(addr, last - addr, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) panic("munmap failed, errno = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) addr = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!pmd_present(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) last = ADD_ROUND(addr, PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (last > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) last = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (pmd_newpage(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) updated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) err = add_munmap(addr, last - addr, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) panic("munmap failed, errno = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) addr = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pte = pte_offset_kernel(pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!pte_present(*pte) || pte_newpage(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) updated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) err = add_munmap(addr, PAGE_SIZE, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) panic("munmap failed, errno = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (pte_present(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) PAGE_SIZE, 0, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) else if (pte_newprot(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) updated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) err = do_ops(&hvc, hvc.index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) panic("flush_tlb_kernel failed, errno = %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return updated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) void *flush = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int r, w, x, prot, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct mm_id *mm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) address &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (address >= STUB_START && address < STUB_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pgd = pgd_offset(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!pgd_present(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!p4d_present(*p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!pud_present(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!pmd_present(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pte = pte_offset_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) r = pte_read(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) w = pte_write(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) x = pte_exec(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!pte_young(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) } else if (!pte_dirty(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) mm_id = &mm->context.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) (x ? UM_PROT_EXEC : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (pte_newpage(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (pte_present(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned long long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 1, &flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) else if (pte_newprot(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) report_enomem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *pte = pte_mkuptodate(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) force_sig(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Don't bother flushing if this address space is about to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (atomic_read(¤t->mm->mm_users) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) flush_tlb_mm(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) flush_tlb_kernel_range_common(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) void flush_tlb_kernel_vm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) flush_tlb_kernel_range_common(start_vm, end_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) void __flush_tlb_one(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static void fix_range(struct mm_struct *mm, unsigned long start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) unsigned long end_addr, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Don't bother flushing if this address space is about to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (atomic_read(&mm->mm_users) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) fix_range_common(mm, start_addr, end_addr, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (vma->vm_mm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) flush_tlb_kernel_range_common(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) else fix_range(vma->vm_mm, start, end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) EXPORT_SYMBOL(flush_tlb_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) fix_range(mm, start, end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) void flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct vm_area_struct *vma = mm->mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) while (vma != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) fix_range(mm, vma->vm_start, vma->vm_end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) vma = vma->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) void force_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct vm_area_struct *vma = mm->mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) while (vma != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) fix_range(mm, vma->vm_start, vma->vm_end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) vma = vma->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }