^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swapops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/userfaultfd_k.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/elf-randomize.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifndef __GENKSYMS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <trace/hooks/syscall_check.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * kfree_const - conditionally free memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @x: pointer to the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Function calls kfree only if @x is not in .rodata section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void kfree_const(const void *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!is_kernel_rodata((unsigned long)x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kfree(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) EXPORT_SYMBOL(kfree_const);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * kstrdup - allocate space for and copy an existing string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @s: the string to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @gfp: the GFP mask used in the kmalloc() call when allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Return: newly allocated copy of @s or %NULL in case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) char *kstrdup(const char *s, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) len = strlen(s) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) buf = kmalloc_track_caller(len, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) memcpy(buf, s, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) EXPORT_SYMBOL(kstrdup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * kstrdup_const - conditionally duplicate an existing const string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @s: the string to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @gfp: the GFP mask used in the kmalloc() call when allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * must not be passed to krealloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Return: source string if it is in .rodata section otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * fallback to kstrdup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) const char *kstrdup_const(const char *s, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (is_kernel_rodata((unsigned long)s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return kstrdup(s, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) EXPORT_SYMBOL(kstrdup_const);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * kstrndup - allocate space for and copy an existing string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @s: the string to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @max: read at most @max chars from @s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @gfp: the GFP mask used in the kmalloc() call when allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Note: Use kmemdup_nul() instead if the size is known exactly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Return: newly allocated copy of @s or %NULL in case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) char *kstrndup(const char *s, size_t max, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) len = strnlen(s, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) buf = kmalloc_track_caller(len+1, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) memcpy(buf, s, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) buf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL(kstrndup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * kmemdup - duplicate region of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @src: memory region to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @len: memory region length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @gfp: GFP mask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Return: newly allocated copy of @src or %NULL in case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void *kmemdup(const void *src, size_t len, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) p = kmalloc_track_caller(len, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) memcpy(p, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) EXPORT_SYMBOL(kmemdup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * kmemdup_nul - Create a NUL-terminated string from unterminated data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @s: The data to stringify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @len: The size of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @gfp: the GFP mask used in the kmalloc() call when allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Return: newly allocated copy of @s with NUL-termination or %NULL in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) buf = kmalloc_track_caller(len + 1, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) memcpy(buf, s, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) buf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) EXPORT_SYMBOL(kmemdup_nul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * memdup_user - duplicate memory region from user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @src: source address in user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Return: an ERR_PTR() on failure. Result is physically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * contiguous, to be freed by kfree().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void *memdup_user(const void __user *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (copy_from_user(p, src, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) EXPORT_SYMBOL(memdup_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * vmemdup_user - duplicate memory region from user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @src: source address in user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Return: an ERR_PTR() on failure. Result may be not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * physically contiguous. Use kvfree() to free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) void *vmemdup_user(const void __user *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) p = kvmalloc(len, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (copy_from_user(p, src, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kvfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) EXPORT_SYMBOL(vmemdup_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * strndup_user - duplicate an existing string from user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @s: The string to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * @n: Maximum number of bytes to copy, including the trailing NUL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Return: newly allocated copy of @s or an ERR_PTR() in case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) char *strndup_user(const char __user *s, long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) long length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) length = strnlen_user(s, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (!length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (length > n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) p = memdup_user(s, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) p[length - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) EXPORT_SYMBOL(strndup_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * memdup_user_nul - duplicate memory region from user space and NUL-terminate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @src: source address in user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Return: an ERR_PTR() on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void *memdup_user_nul(const void __user *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * Always use GFP_KERNEL, since copy_from_user() can sleep and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * cause pagefault, which makes it pointless to use GFP_NOFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * or GFP_ATOMIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) p = kmalloc_track_caller(len + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (copy_from_user(p, src, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) p[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) EXPORT_SYMBOL(memdup_user_nul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct vm_area_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct vm_area_struct *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) vma->vm_prev = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) next = prev->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) prev->vm_next = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) next = mm->mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) mm->mmap = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) vma->vm_next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) next->vm_prev = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct vm_area_struct *prev, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) next = vma->vm_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) prev = vma->vm_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) prev->vm_next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) mm->mmap = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) next->vm_prev = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Check if the vma is being used as a stack by this task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int vma_is_stack_for_current(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct task_struct * __maybe_unused t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #ifndef STACK_RND_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long randomize_stack_top(unsigned long stack_top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned long random_variable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (current->flags & PF_RANDOMIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) random_variable = get_random_long();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) random_variable &= STACK_RND_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) random_variable <<= PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #ifdef CONFIG_STACK_GROWSUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return PAGE_ALIGN(stack_top) + random_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return PAGE_ALIGN(stack_top) - random_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long arch_randomize_brk(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Is the current task 32bit ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return randomize_page(mm->brk, SZ_32M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return randomize_page(mm->brk, SZ_1G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned long arch_mmap_rnd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (is_compat_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return rnd << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) EXPORT_SYMBOL_GPL(arch_mmap_rnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int mmap_is_legacy(struct rlimit *rlim_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (current->personality & ADDR_COMPAT_LAYOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (rlim_stack->rlim_cur == RLIM_INFINITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return sysctl_legacy_va_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Leave enough space between the mmap area and the stack to honour ulimit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * the face of randomisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #define MIN_GAP (SZ_128M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #define MAX_GAP (STACK_TOP / 6 * 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long gap = rlim_stack->rlim_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) unsigned long pad = stack_guard_gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Account for stack randomization if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (current->flags & PF_RANDOMIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pad += (STACK_RND_MASK << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Values close to RLIM_INFINITY can overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (gap + pad > gap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) gap += pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (gap < MIN_GAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) gap = MIN_GAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) else if (gap > MAX_GAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) gap = MAX_GAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return PAGE_ALIGN(STACK_TOP - gap - rnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned long random_factor = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (current->flags & PF_RANDOMIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) random_factor = arch_mmap_rnd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (mmap_is_legacy(rlim_stack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) mm->get_unmapped_area = arch_get_unmapped_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) mm->mmap_base = mmap_base(random_factor, rlim_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mm->get_unmapped_area = arch_get_unmapped_area_topdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mm->mmap_base = TASK_UNMAPPED_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mm->get_unmapped_area = arch_get_unmapped_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * __account_locked_vm - account locked pages to an mm's locked_vm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * @mm: mm to account against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @pages: number of pages to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @inc: %true if @pages should be considered positive, %false if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @task: task used to check RLIMIT_MEMLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Assumes @task and @mm are valid (i.e. at least one reference on each), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * that mmap_lock is held as writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct task_struct *task, bool bypass_rlim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned long locked_vm, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mmap_assert_write_locked(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) locked_vm = mm->locked_vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!bypass_rlim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (locked_vm + pages > limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) mm->locked_vm = locked_vm + pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) WARN_ON_ONCE(pages > locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mm->locked_vm = locked_vm - pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret ? " - exceeded" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) EXPORT_SYMBOL_GPL(__account_locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * account_locked_vm - account locked pages to an mm's locked_vm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @mm: mm to account against, may be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * @pages: number of pages to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * @inc: %true if @pages should be considered positive, %false if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * * 0 on success, or if mm is NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (pages == 0 || !mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ret = __account_locked_vm(mm, pages, inc, current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) capable(CAP_IPC_LOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) EXPORT_SYMBOL_GPL(account_locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned long len, unsigned long prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long flag, unsigned long pgoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long populate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) LIST_HEAD(uf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ret = security_mmap_file(file, prot, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (mmap_write_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) &uf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) userfaultfd_unmap_complete(mm, &uf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (populate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mm_populate(ret, populate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) trace_android_vh_check_mmap_file(file, prot, flag, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long vm_mmap(struct file *file, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long len, unsigned long prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned long flag, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (unlikely(offset + PAGE_ALIGN(len) < offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (unlikely(offset_in_page(offset)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) EXPORT_SYMBOL(vm_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * failure, fall back to non-contiguous (vmalloc) allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @size: size of the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * @node: numa node to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Uses kmalloc to get the memory but if the allocation fails then falls back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * to the vmalloc allocator. Use kvfree for freeing the memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * preferable to the vmalloc fallback, due to visible performance drawbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * fall back to vmalloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Return: pointer to the allocated memory of %NULL in case of failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) void *kvmalloc_node(size_t size, gfp_t flags, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) gfp_t kmalloc_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * so the given set of flags has to be compatible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if ((flags & GFP_KERNEL) != GFP_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return kmalloc_node(size, flags, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * We want to attempt a large physically contiguous block first because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * it is less likely to fragment multiple larger blocks and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * contribute to a long term fragmentation less than vmalloc fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * However make sure that larger requests are not too disruptive - no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * OOM killer and no allocation failure warnings as we have a fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (size > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) kmalloc_flags |= __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) kmalloc_flags |= __GFP_NORETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = kmalloc_node(size, kmalloc_flags, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * It doesn't really make sense to fallback to vmalloc for sub page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ret || size <= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Don't even allow crazy sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (unlikely(size > INT_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) WARN_ON_ONCE(!(flags & __GFP_NOWARN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return __vmalloc_node(size, 1, flags, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) EXPORT_SYMBOL(kvmalloc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * kvfree() - Free memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * @addr: Pointer to allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * It is slightly more efficient to use kfree() or vfree() if you are certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * that you know which one to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Context: Either preemptible task context or not-NMI interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) void kvfree(const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (is_vmalloc_addr(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) vfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) kfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) EXPORT_SYMBOL(kvfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * kvfree_sensitive - Free a data object containing sensitive information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * @addr: address of the data object to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * @len: length of the data object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Use the special memzero_explicit() function to clear the content of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * kvmalloc'ed object containing sensitive data to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * compiler won't optimize out the data clearing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void kvfree_sensitive(const void *addr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (likely(!ZERO_OR_NULL_PTR(addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) memzero_explicit((void *)addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) kvfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) EXPORT_SYMBOL(kvfree_sensitive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static inline void *__page_rmapping(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned long mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) mapping = (unsigned long)page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) mapping &= ~PAGE_MAPPING_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return (void *)mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* Neutral page->mapping pointer to address_space or anon_vma or other */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void *page_rmapping(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) page = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return __page_rmapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Return true if this page is mapped into pagetables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * For compound page it returns true if any subpage of compound page is mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bool page_mapped(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (likely(!PageCompound(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return atomic_read(&page->_mapcount) >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) page = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (atomic_read(compound_mapcount_ptr(page)) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (PageHuge(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) for (i = 0; i < compound_nr(page); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (atomic_read(&page[i]._mapcount) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) EXPORT_SYMBOL(page_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct anon_vma *page_anon_vma(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned long mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) page = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) mapping = (unsigned long)page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return __page_rmapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct address_space *page_mapping(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) page = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* This happens if someone calls flush_dcache_page on slab page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (unlikely(PageSlab(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (unlikely(PageSwapCache(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) swp_entry_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) entry.val = page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return swap_address_space(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if ((unsigned long)mapping & PAGE_MAPPING_ANON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) EXPORT_SYMBOL(page_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * For file cache pages, return the address_space, otherwise return NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct address_space *page_mapping_file(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (unlikely(PageSwapCache(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return page_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Slow path of page_mapcount() for compound pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int __page_mapcount(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret = atomic_read(&page->_mapcount) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * For file THP page->_mapcount contains total number of mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * of the page: no need to look into compound_mapcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!PageAnon(page) && !PageHuge(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) page = compound_head(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ret += atomic_read(compound_mapcount_ptr(page)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (PageDoubleMap(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ret--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) EXPORT_SYMBOL_GPL(__page_mapcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int sysctl_overcommit_ratio __read_mostly = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned long sysctl_overcommit_kbytes __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (ret == 0 && write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) sysctl_overcommit_kbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void sync_overcommit_as(struct work_struct *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) percpu_counter_sync(&vm_committed_as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct ctl_table t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int new_policy = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * The deviation of sync_overcommit_as could be big with loose policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * with the strict "NEVER", and to avoid possible race condtion (even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * though user usually won't too frequently do the switching to policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * OVERCOMMIT_NEVER), the switch is done in the following order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * 1. changing the batch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * 2. sync percpu count on each CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * 3. switch the policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) t = *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) t.data = &new_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (ret || new_policy == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) mm_compute_batch(new_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (new_policy == OVERCOMMIT_NEVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) schedule_on_each_cpu(sync_overcommit_as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) sysctl_overcommit_memory = new_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (ret == 0 && write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) sysctl_overcommit_ratio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned long vm_commit_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned long allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (sysctl_overcommit_kbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) allowed = ((totalram_pages() - hugetlb_total_pages())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * sysctl_overcommit_ratio / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) allowed += total_swap_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * Make sure vm_committed_as in one cacheline and not cacheline shared with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * other variables. It can be updated by several CPUs frequently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * The global memory commitment made in the system can be a metric
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * that can be used to drive ballooning decisions when Linux is hosted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * as a guest. On Hyper-V, the host implements a policy engine for dynamically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * balancing memory across competing virtual machines that are hosted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * Several metrics drive this policy engine including the guest reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * memory commitment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * The time cost of this is very low for small platforms, and for big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * platform like a 2S/36C/72T Skylake server, in worst case where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * vm_committed_as's spinlock is under severe contention, the time cost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * could be about 30~40 microseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unsigned long vm_memory_committed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return percpu_counter_sum_positive(&vm_committed_as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) EXPORT_SYMBOL_GPL(vm_memory_committed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Check that a process has enough memory to allocate a new virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * mapping. 0 means there is enough memory for the allocation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * succeed and -ENOMEM implies there is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * We currently support three overcommit policies, which are set via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Additional code 2002 Jul 20 by Robert Love.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Note this is a helper function intended to be used by LSMs which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * wish to use this logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) long allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) vm_acct_memory(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * Sometimes we want to use more memory than we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (pages > totalram_pages() + total_swap_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) allowed = vm_commit_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Reserve some for root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!cap_sys_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Don't let a single process grow so big a user can't recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) allowed -= min_t(long, mm->total_vm / 32, reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (percpu_counter_read_positive(&vm_committed_as) < allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) vm_unacct_memory(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * get_cmdline() - copy the cmdline value to a buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * @task: the task whose cmdline value to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * @buffer: the buffer to copy to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * @buflen: the length of the buffer. Larger cmdline values are truncated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * to this length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Return: the size of the cmdline field copied. Note that the copy does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * not guarantee an ending NULL byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int get_cmdline(struct task_struct *task, char *buffer, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct mm_struct *mm = get_task_mm(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) unsigned long arg_start, arg_end, env_start, env_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!mm->arg_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) goto out_mm; /* Shh! No looking before we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) spin_lock(&mm->arg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) arg_start = mm->arg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) arg_end = mm->arg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) env_start = mm->env_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) env_end = mm->env_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) spin_unlock(&mm->arg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) len = arg_end - arg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (len > buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) len = buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * If the nul at the end of args has been overwritten, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * assume application is using setproctitle(3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) len = strnlen(buffer, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (len < res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) res = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) len = env_end - env_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (len > buflen - res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) len = buflen - res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) res += access_process_vm(task, env_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) buffer+res, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) res = strnlen(buffer, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) out_mm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int __weak memcmp_pages(struct page *page1, struct page *page2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) char *addr1, *addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) addr1 = kmap_atomic(page1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) addr2 = kmap_atomic(page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ret = memcmp(addr1, addr2, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) kunmap_atomic(addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) kunmap_atomic(addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }