^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * User address space access functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 1997 Andi Kleen <ak@muc.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 1997 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2002 Andi Kleen <ak@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Zero Userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned long __clear_user(void __user *addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) long __d0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* no memory constraint because it doesn't change any memory gcc knows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) about */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) stac();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) " testq %[size8],%[size8]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) " jz 4f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) " .align 16\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) "0: movq $0,(%[dst])\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) " addq $8,%[dst]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) " decl %%ecx ; jnz 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) "4: movq %[size1],%%rcx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) " testl %%ecx,%%ecx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) " jz 2f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) "1: movb $0,(%[dst])\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) " incq %[dst]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) " decl %%ecx ; jnz 1b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "2:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ".section .fixup,\"ax\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "3: lea 0(%[size1],%[size8],8),%[size8]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) " jmp 2b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ".previous\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) _ASM_EXTABLE_UA(0b, 3b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) _ASM_EXTABLE_UA(1b, 2b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) : [size8] "=&c"(size), [dst] "=&D" (__d0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) clac();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) EXPORT_SYMBOL(__clear_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long clear_user(void __user *to, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (access_ok(to, n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return __clear_user(to, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) EXPORT_SYMBOL(clear_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * clean_cache_range - write back a cache range with CLWB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @vaddr: virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @size: number of bytes to write back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Write back a cache range using the CLWB (cache line write back)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * instruction. Note that @size is internally rounded up to be cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * line size aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void clean_cache_range(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long clflush_mask = x86_clflush_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void *vend = addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for (p = (void *)((unsigned long)addr & ~clflush_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) p < vend; p += x86_clflush_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) clwb(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void arch_wb_cache_pmem(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) clean_cache_range(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long flushed, dest = (unsigned long) dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) long rc = __copy_user_nocache(dst, src, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * __copy_user_nocache() uses non-temporal stores for the bulk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * of the transfer, but we need to manually flush if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * transfer is unaligned. A cached memory copy is used when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * destination or size is not naturally aligned. That is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * - Require 8-byte alignment when size is 8 bytes or larger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * - Require 4-byte alignment when size is 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (size < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!IS_ALIGNED(dest, 4) || size != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) clean_cache_range(dst, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!IS_ALIGNED(dest, 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) clean_cache_range(dst, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) flushed = dest - (unsigned long) dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (size > flushed && !IS_ALIGNED(size - flushed, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) clean_cache_range(dst + size - 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long dest = (unsigned long) _dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long source = (unsigned long) _src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* cache copy and flush to align dest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!IS_ALIGNED(dest, 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) memcpy((void *) dest, (void *) source, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) clean_cache_range((void *) dest, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dest += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) source += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* 4x8 movnti loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) while (size >= 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) asm("movq (%0), %%r8\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) "movq 8(%0), %%r9\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "movq 16(%0), %%r10\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) "movq 24(%0), %%r11\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) "movnti %%r8, (%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "movnti %%r9, 8(%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) "movnti %%r10, 16(%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) "movnti %%r11, 24(%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) :: "r" (source), "r" (dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) : "memory", "r8", "r9", "r10", "r11");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dest += 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) source += 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) size -= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* 1x8 movnti loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) while (size >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) asm("movq (%0), %%r8\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) "movnti %%r8, (%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) :: "r" (source), "r" (dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) : "memory", "r8");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dest += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) source += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) size -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* 1x4 movnti loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) while (size >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) asm("movl (%0), %%r8d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) "movnti %%r8d, (%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) :: "r" (source), "r" (dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) : "memory", "r8");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dest += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) source += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) size -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* cache copy for remaining bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) memcpy((void *) dest, (void *) source, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) clean_cache_range((void *) dest, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL_GPL(__memcpy_flushcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) char *from = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) memcpy_flushcache(to, from + offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) kunmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #endif