^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * crash_dump.c - Memory preserving reboot related code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) IBM Corporation, 2004. All rights reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * copy_oldmem_page - copy one page from "oldmem"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * @pfn: page frame number to be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @buf: target memory address for the copy; this can be in kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * space or user address space (see @userbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * @csize: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * @offset: offset in bytes into the page (based on pfn) to begin the copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * @userbuf: if set, @buf is in user address space, use copy_to_user(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * otherwise @buf is in kernel address space, use memcpy().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Copy a page from "oldmem". For this page, there is no pte mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * in the current kernel. We stitch up a pte, similar to kmap_atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) size_t csize, unsigned long offset, int userbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!csize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (userbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (copy_to_user(buf, (vaddr + offset), csize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) memcpy(buf, (vaddr + offset), csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }