^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifdef CONFIG_X86_MCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * See COPY_MC_TEST for self-test of the copy_mc_fragile()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void enable_copy_mc_fragile(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static_branch_inc(©_mc_fragile_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define copy_mc_fragile_enabled (static_branch_unlikely(©_mc_fragile_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Similar to copy_user_handle_tail, probe for the write fault point, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * source exception point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __visible notrace unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) for (; len; --len, to++, from++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (copy_mc_fragile(to, from, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * No point in doing careful copying, or consulting a static key when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * there is no #MC handler in the CONFIG_X86_MCE=n case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void enable_copy_mc_fragile(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define copy_mc_fragile_enabled (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * copy_mc_to_kernel - memory copy that handles source exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @dst: destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @src: source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Call into the 'fragile' version on systems that benefit from avoiding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * corner case poison consumption scenarios, For example, accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * poison across 2 cachelines with a single instruction. Almost all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * other uses case can use copy_mc_enhanced_fast_string() for a fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * recoverable copy, or fallback to plain memcpy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Return 0 for success, or number of bytes not copied if there was an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (copy_mc_fragile_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return copy_mc_fragile(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (static_cpu_has(X86_FEATURE_ERMS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return copy_mc_enhanced_fast_string(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (copy_mc_fragile_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ret = copy_mc_fragile(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __uaccess_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (static_cpu_has(X86_FEATURE_ERMS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ret = copy_mc_enhanced_fast_string(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __uaccess_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return copy_user_generic(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }