^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * kernel/crash_dump.c - Memory preserving reboot related code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Created by: Simon Horman <horms@verge.net.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Original code moved from kernel/crash.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Original code comment copied from the i386 version of this file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * copy_oldmem_page - copy one page from "oldmem"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * @pfn: page frame number to be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * @buf: target memory address for the copy; this can be in kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * space or user address space (see @userbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @csize: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @offset: offset in bytes into the page (based on pfn) to begin the copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @userbuf: if set, @buf is in user address space, use copy_to_user(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * otherwise @buf is in kernel address space, use memcpy().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Copy a page from "oldmem". For this page, there is no pte mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * in the current kernel. We stitch up a pte, similar to kmap_atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Calling copy_to_user() in atomic context is not desirable. Hence first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * copying the data to a pre-allocated kernel page and then copying to user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * space in non-atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) copy_oldmem_page(unsigned long pfn, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) size_t csize, unsigned long offset, int userbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (!csize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) vaddr = __va(pfn<<PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (userbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (copy_to_user(buf, (vaddr + offset), csize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) memcpy(buf, (vaddr + offset), csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)