Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * which are designed to protect kernel memory from needless exposure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * and overwrite under many unintended conditions. This code is based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * on PAX_USERCOPY, which is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Security Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Checks if a given pointer and length is contained by the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * stack frame (if possible).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *	NOT_STACK: not at all on the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *	GOOD_FRAME: fully within a valid stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	GOOD_STACK: fully on the stack (when can't do frame-checking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *	BAD_STACK: error condition (invalid stack position or bad stack frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static noinline int check_stack_object(const void *obj, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	const void * const stack = task_stack_page(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	const void * const stackend = stack + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* Object is not on the stack at all. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	if (obj + len <= stack || stackend <= obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		return NOT_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	 * Reject: object partially overlaps the stack (passing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * check above means at least one end is within the stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * so if this check fails, the other end is outside the stack).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (obj < stack || stackend < obj + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return BAD_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	/* Check if object is safely within a valid frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	ret = arch_within_stack_frames(stack, stackend, obj, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	return GOOD_STACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * an unexpected state during a copy_from_user() or copy_to_user() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * There are several checks being performed on the buffer by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * __check_object_size() function. Normal stack buffer usage should never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * trip the checks, and kernel text addressing will always trip the check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * For cache objects, it is checking that only the whitelisted range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * bytes for a given cache is being accessed (via the cache's usersize and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * kmem_cache_create_usercopy() function to create the cache (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * carefully audit the whitelist range).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) void usercopy_warn(const char *name, const char *detail, bool to_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		   unsigned long offset, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		 to_user ? "exposure" : "overwrite",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		 to_user ? "from" : "to",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		 name ? : "unknown?!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		 offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) void __noreturn usercopy_abort(const char *name, const char *detail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			       bool to_user, unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			       unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		 to_user ? "exposure" : "overwrite",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		 to_user ? "from" : "to",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		 name ? : "unknown?!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		 offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * For greater effect, it would be nice to do do_group_exit(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * but BUG() actually hooks all the lock-breaking and per-arch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * Oops code, so that is used here instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static bool overlaps(const unsigned long ptr, unsigned long n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		     unsigned long low, unsigned long high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	const unsigned long check_low = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	unsigned long check_high = check_low + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/* Does not overlap if entirely above or entirely below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (check_low >= high || check_high <= low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Is this address range in the kernel text area? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void check_kernel_text_object(const unsigned long ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 					    unsigned long n, bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	unsigned long textlow = (unsigned long)_stext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	unsigned long texthigh = (unsigned long)_etext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	unsigned long textlow_linear, texthigh_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (overlaps(ptr, n, textlow, texthigh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * Some architectures have virtual memory mappings with a secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * mapping of the kernel text, i.e. there is more than one virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * kernel address that points to the kernel image. It is usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 * when there is a separate linear physical memory mapping, in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	 * __pa() is not just the reverse of __va(). This can be detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	 * and checked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	textlow_linear = (unsigned long)lm_alias(textlow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* No different mapping: we're done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (textlow_linear == textlow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	/* Check the secondary mapping... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	texthigh_linear = (unsigned long)lm_alias(texthigh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		usercopy_abort("linear kernel text", NULL, to_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			       ptr - textlow_linear, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				       bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/* Reject if object wraps past end of memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (ptr + (n - 1) < ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	/* Reject if NULL or ZERO-allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (ZERO_OR_NULL_PTR(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		usercopy_abort("null address", NULL, to_user, ptr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Checks for allocs that are marked in some way as spanning multiple pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void check_page_span(const void *ptr, unsigned long n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 				   struct page *page, bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	const void *end = ptr + n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct page *endpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	bool is_reserved, is_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * Sometimes the kernel data regions are not marked Reserved (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * check below). And sometimes [_sdata,_edata) does not cover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 * rodata and/or bss, so check each range explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/* Allow reads of kernel rodata region (if not marked as Reserved). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (ptr >= (const void *)__start_rodata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	    end <= (const void *)__end_rodata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		if (!to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			usercopy_abort("rodata", NULL, to_user, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* Allow kernel data region (if not marked as Reserved). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	/* Allow kernel bss region (if not marked as Reserved). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (ptr >= (const void *)__bss_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	    end <= (const void *)__bss_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	/* Is the object wholly within one base page? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	/* Allow if fully inside the same compound (__GFP_COMP) page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	endpage = virt_to_head_page(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (likely(endpage == page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 * Reject if range is entirely either Reserved (i.e. special or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * device memory), or CMA. Otherwise, reject since the object spans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * several independently allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	is_reserved = PageReserved(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	is_cma = is_migrate_cma_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (!is_reserved && !is_cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		page = virt_to_head_page(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (is_reserved && !PageReserved(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			usercopy_abort("spans Reserved and non-Reserved pages",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				       NULL, to_user, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		if (is_cma && !is_migrate_cma_page(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			usercopy_abort("spans CMA and non-CMA pages", NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				       to_user, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline void check_heap_object(const void *ptr, unsigned long n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 				     bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if (!virt_addr_valid(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * highmem page or fallback to virt_to_page(). The following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * is effectively a highmem-aware virt_to_head_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	page = compound_head(kmap_to_page((void *)ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (PageSlab(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		/* Check slab allocator for flags and size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		__check_heap_object(ptr, n, page, to_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		/* Verify object does not incorrectly span multiple pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		check_page_span(ptr, n, page, to_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  * Validates that the given object is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * - not bogus address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * - fully contained by stack (or stack frame, when available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * - fully within SLAB object (or object whitelist area, when available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * - not in kernel text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void __check_object_size(const void *ptr, unsigned long n, bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (static_branch_unlikely(&bypass_usercopy_checks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	/* Skip all tests if size is zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	/* Check for invalid addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	check_bogus_address((const unsigned long)ptr, n, to_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	/* Check for bad stack object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	switch (check_stack_object(ptr, n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	case NOT_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		/* Object is not touching the current process stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	case GOOD_FRAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	case GOOD_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		 * Object is either in the correct frame (when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		 * is possible to check) or just generally on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		 * process stack (when frame checking not available).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		usercopy_abort("process stack", NULL, to_user, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	/* Check for bad heap object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	check_heap_object(ptr, n, to_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/* Check for object in kernel to avoid text exposure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) EXPORT_SYMBOL(__check_object_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static bool enable_checks __initdata = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int __init parse_hardened_usercopy(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (strtobool(str, &enable_checks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) __setup("hardened_usercopy=", parse_hardened_usercopy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int __init set_hardened_usercopy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (enable_checks == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		static_branch_enable(&bypass_usercopy_checks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) late_initcall(set_hardened_usercopy);