^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/mm/process_vm_access.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * process_vm_rw_pages - read/write pages from task specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @pages: array of pointers to pages we want to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @offset: offset in page to start copying from/to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @iter: where to copy to/from locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @vm_write: 0 means copy from, 1 means copy to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Returns 0 on success, error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int process_vm_rw_pages(struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int vm_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Do the copy for each page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) while (len && iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct page *page = *pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) size_t copy = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) size_t copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (copy > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (vm_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) copied = copy_page_from_iter(page, offset, copy, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) copied = copy_page_to_iter(page, offset, copy, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) len -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (copied < copy && iov_iter_count(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Maximum number of pages kmalloc'd to hold struct page's during copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * process_vm_rw_single_vec - read/write pages from task specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @addr: start memory address of target process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @len: size of area to copy to/from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @iter: where to copy to/from locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @process_pages: struct pages area that can store at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * nr_pages_to_copy struct page pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @mm: mm for task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @task: task to read/write from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @vm_write: 0 means copy from, 1 means copy to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Returns 0 on success or on failure error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int process_vm_rw_single_vec(unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct page **process_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int vm_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long pa = addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long start_offset = addr - pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ssize_t rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) / sizeof(struct pages *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Work out address and page range required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (vm_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) flags |= FOLL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) while (!rc && nr_pages && iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int pinned_pages = min(nr_pages, max_pages_per_loop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Get the pages we're interested in. We must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * access remotely because task/mm might not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * current/current->mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) flags, process_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) NULL, &locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (pinned_pages <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bytes = pinned_pages * PAGE_SIZE - start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (bytes > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) rc = process_vm_rw_pages(process_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) start_offset, bytes, iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) vm_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) start_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) nr_pages -= pinned_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pa += pinned_pages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* If vm_write is set, the pages need to be made dirty: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unpin_user_pages_dirty_lock(process_pages, pinned_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) vm_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Maximum number of entries for process pages array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) which lives on stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PVM_MAX_PP_ARRAY_COUNT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * process_vm_rw_core - core of reading/writing pages from task specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @pid: PID of process to read/write from/to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @iter: where to copy to/from locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @rvec: iovec array specifying where to copy to/from in the other process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @riovcnt: size of rvec array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @flags: currently unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @vm_write: 0 if reading from other process, 1 if writing to other process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Returns the number of bytes read/written or error code. May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * return less bytes than expected if an error occurs during the copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) const struct iovec *rvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long riovcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long flags, int vm_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct page **process_pages = pp_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ssize_t rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long nr_pages_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ssize_t iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) size_t total_len = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Work out how many pages of struct pages we're going to need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * when eventually calling get_user_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) for (i = 0; i < riovcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) iov_len = rvec[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (iov_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) nr_pages_iov = ((unsigned long)rvec[i].iov_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) + iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) / PAGE_SIZE - (unsigned long)rvec[i].iov_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) / PAGE_SIZE + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) nr_pages = max(nr_pages, nr_pages_iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* For reliability don't try to kmalloc more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 2 pages worth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sizeof(struct pages *)*nr_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!process_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Get process information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) task = find_get_task_by_vpid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) rc = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) goto free_proc_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!mm || IS_ERR(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Explicitly map EACCES to EPERM as EPERM is a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * appropriate error code for process_vw_readv/writev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (rc == -EACCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto put_task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rc = process_vm_rw_single_vec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) iter, process_pages, mm, task, vm_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* copied = space before - space after */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) total_len -= iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* If we have managed to copy any data at all then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) we return the number of bytes copied. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) we return the error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (total_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rc = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) put_task_struct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) free_proc_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (process_pages != pp_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kfree(process_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * process_vm_rw - check iovecs before calling core routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @pid: PID of process to read/write from/to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @lvec: iovec array specifying where to copy to/from locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @liovcnt: size of lvec array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @rvec: iovec array specifying where to copy to/from in the other process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @riovcnt: size of rvec array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @flags: currently unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @vm_write: 0 if reading from other process, 1 if writing to other process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Returns the number of bytes read/written or error code. May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * return less bytes than expected if an error occurs during the copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static ssize_t process_vm_rw(pid_t pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) const struct iovec __user *lvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned long liovcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) const struct iovec __user *rvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long riovcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned long flags, int vm_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct iovec iovstack_l[UIO_FASTIOV];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct iovec iovstack_r[UIO_FASTIOV];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct iovec *iov_l = iovstack_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct iovec *iov_r = iovstack_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int dir = vm_write ? WRITE : READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (flags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Check iovecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!iov_iter_count(&iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto free_iov_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) in_compat_syscall());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (IS_ERR(iov_r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rc = PTR_ERR(iov_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto free_iov_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (iov_r != iovstack_r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) kfree(iov_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) free_iov_l:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) kfree(iov_l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned long, liovcnt, const struct iovec __user *, rvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned long, riovcnt, unsigned long, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) const struct iovec __user *, lvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long, liovcnt, const struct iovec __user *, rvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long, riovcnt, unsigned long, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }