Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * privcmd.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Interface to privileged domain-0 commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (c) 2002-2004, K A Fraser, B Dragovic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <xen/privcmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <xen/interface/hvm/dm_op.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <xen/features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <xen/xen-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <xen/balloon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "privcmd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define PRIV_VMA_LOCKED ((void *)1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static unsigned int privcmd_dm_op_max_num = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) MODULE_PARM_DESC(dm_op_max_nr_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		 "Maximum number of buffers per dm_op hypercall");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static unsigned int privcmd_dm_op_buf_max_size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		   0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) MODULE_PARM_DESC(dm_op_buf_max_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		 "Maximum size of a dm_op hypercall buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) struct privcmd_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	domid_t domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static int privcmd_vma_range_is_mapped(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)                struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)                unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)                unsigned long nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct privcmd_hypercall hypercall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	/* Disallow arbitrary hypercalls if restricted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	if (data->domid != DOMID_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	xen_preemptible_hcall_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	ret = privcmd_call(hypercall.op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			   hypercall.arg[0], hypercall.arg[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 			   hypercall.arg[2], hypercall.arg[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 			   hypercall.arg[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	xen_preemptible_hcall_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static void free_page_list(struct list_head *pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct page *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	list_for_each_entry_safe(p, n, pages, lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		__free_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	INIT_LIST_HEAD(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * Given an array of items in userspace, return a list of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * containing the data.  If copying fails, either because of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * allocation failure or a problem reading user memory, return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * error code; its up to the caller to dispose of any partial list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int gather_array(struct list_head *pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 			unsigned nelem, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			const void __user *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	unsigned pageidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	void *pagedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	pageidx = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	pagedata = NULL;	/* quiet, gcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	while (nelem--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		if (pageidx > PAGE_SIZE-size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			struct page *page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			if (page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			pagedata = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 			list_add_tail(&page->lru, pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			pageidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (copy_from_user(pagedata + pageidx, data, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		data += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		pageidx += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * Call function "fn" on each element of the array fragmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * over a list of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static int traverse_pages(unsigned nelem, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			  struct list_head *pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			  int (*fn)(void *data, void *state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			  void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	void *pagedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	unsigned pageidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	BUG_ON(size > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	pageidx = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	pagedata = NULL;	/* hush, gcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	while (nelem--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		if (pageidx > PAGE_SIZE-size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			pos = pos->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			page = list_entry(pos, struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			pagedata = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			pageidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		ret = (*fn)(pagedata + pageidx, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		pageidx += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * Similar to traverse_pages, but use each page as a "block" of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * data to be processed as one unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static int traverse_pages_block(unsigned nelem, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 				struct list_head *pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 				int (*fn)(void *data, int nr, void *state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	void *pagedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	BUG_ON(size > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	while (nelem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		int nr = (PAGE_SIZE/size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if (nr > nelem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			nr = nelem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		pos = pos->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		page = list_entry(pos, struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		pagedata = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		ret = (*fn)(pagedata, nr, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		nelem -= nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) struct mmap_gfn_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	domid_t domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static int mmap_gfn_range(void *data, void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct privcmd_mmap_entry *msg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	struct mmap_gfn_state *st = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct vm_area_struct *vma = st->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	/* Do not allow range to wrap the address space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	/* Range chunks must be contiguous in va space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	if ((msg->va != st->va) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	rc = xen_remap_domain_gfn_range(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 					msg->va & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 					msg->mfn, msg->npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 					vma->vm_page_prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 					st->domain, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	st->va += msg->npages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct privcmd_mmap mmapcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	LIST_HEAD(pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct mmap_gfn_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (xen_feature(XENFEAT_auto_translated_physmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	/* If restriction is in place, check the domid matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	rc = gather_array(&pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			  mmapcmd.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (rc || list_empty(&pagelist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		struct page *page = list_first_entry(&pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 						     struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		struct privcmd_mmap_entry *msg = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		vma = find_vma(mm, msg->va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		vma->vm_private_data = PRIV_VMA_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	state.va = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	state.vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	state.domain = mmapcmd.dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			    &pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			    mmap_gfn_range, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) out_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	free_page_list(&pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) struct mmap_batch_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	domid_t domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	/* A tristate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	 *      0 for no errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 *      1 if at least one error has happened (and no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 *          -ENOENT errors have happened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	 *      -ENOENT if at least 1 -ENOENT has happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	int global_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	/* User-space gfn array to store errors in the second pass for V1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	xen_pfn_t __user *user_gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	/* User-space int array to store errors in the second pass for V2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	int __user *user_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) /* auto translated dom0 note: if domU being created is PV, then gfn is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static int mmap_batch_fn(void *data, int nr, void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	xen_pfn_t *gfnp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	struct mmap_batch_state *st = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct vm_area_struct *vma = st->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct page **pages = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct page **cur_pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (xen_feature(XENFEAT_auto_translated_physmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		cur_pages = &pages[st->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	BUG_ON(nr < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 					 (int *)gfnp, st->vma->vm_page_prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 					 st->domain, cur_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/* Adjust the global_error? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (ret != nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			st->global_error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 			/* Record that at least one error has happened. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			if (st->global_error == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				st->global_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	st->va += XEN_PAGE_SIZE * nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	st->index += nr / XEN_PFN_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) static int mmap_return_error(int err, struct mmap_batch_state *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	if (st->version == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			xen_pfn_t gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			ret = get_user(gfn, st->user_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			 * V1 encodes the error codes in the 32bit top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			 * nibble of the gfn (with its known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			 * limitations vis-a-vis 64 bit callers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			gfn |= (err == -ENOENT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				PRIVCMD_MMAPBATCH_PAGED_ERROR :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 				PRIVCMD_MMAPBATCH_MFN_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			return __put_user(gfn, st->user_gfn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			st->user_gfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	} else { /* st->version == 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			return __put_user(err, st->user_err++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			st->user_err++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) static int mmap_return_errors(void *data, int nr, void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct mmap_batch_state *st = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	int *errs = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		ret = mmap_return_error(errs[i], st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) /* Allocate pfns that are then mapped with gfns from foreign domid. Update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  * the vma with the page info to use later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  * Returns: 0 if success, otherwise -errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (pages == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	rc = xen_alloc_unpopulated_pages(numpgs, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			numpgs, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	BUG_ON(vma->vm_private_data != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	vma->vm_private_data = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) static const struct vm_operations_struct privcmd_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static long privcmd_ioctl_mmap_batch(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	struct file *file, void __user *udata, int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct privcmd_mmapbatch_v2 m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	unsigned long nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	LIST_HEAD(pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct mmap_batch_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	switch (version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		/* Returns per-frame error in m.arr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		m.err = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		/* Returns per-frame error code in m.err. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (!access_ok(m.err, m.num * (sizeof(*m.err))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* If restriction is in place, check the domid matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (data->domid != DOMID_INVALID && data->domid != m.dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	if (list_empty(&pagelist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (version == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		/* Zero error array now to only copy back actual errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		if (clear_user(m.err, sizeof(int) * m.num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	vma = find_vma(mm, m.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (!vma ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	    vma->vm_ops != &privcmd_vm_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	 * Caller must either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 * Map the whole VMA range, which will also allocate all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	 * pages required for the auto_translated_physmap case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	 * Or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * Map unmapped holes left from a previous map attempt (e.g.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * because those foreign frames were previously paged out).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (vma->vm_private_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		if (m.addr != vma->vm_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		    m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		if (xen_feature(XENFEAT_auto_translated_physmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			ret = alloc_empty_pages(vma, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			vma->vm_private_data = PRIV_VMA_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (m.addr < vma->vm_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		    m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	state.domain        = m.dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	state.vma           = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	state.va            = m.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	state.index         = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	state.global_error  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	state.version       = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/* mmap_batch_fn guarantees ret == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 				    &pagelist, mmap_batch_fn, &state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	if (state.global_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		/* Write back errors in second pass. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		state.user_gfn = (xen_pfn_t *)m.arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		state.user_err = m.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 					   &pagelist, mmap_return_errors, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	/* If we have not had any EFAULT-like global errors then set the global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 * error to -ENOENT if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if ((ret == 0) && (state.global_error == -ENOENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	free_page_list(&pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static int lock_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	struct privcmd_dm_op_buf kbufs[], unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		unsigned int requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		int page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		requested = DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		if (requested > nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		page_count = pin_user_pages_fast(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			(unsigned long) kbufs[i].uptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			requested, FOLL_WRITE, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		if (page_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			return page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		*pinned += page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		nr_pages -= page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		pages += page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static void unlock_pages(struct page *pages[], unsigned int nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	unpin_user_pages_dirty_lock(pages, nr_pages, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	struct privcmd_dm_op kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	struct privcmd_dm_op_buf *kbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	unsigned int nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	struct page **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	struct xen_dm_op_buf *xbufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	unsigned int pinned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	/* If restriction is in place, check the domid matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (kdata.num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (kdata.num > privcmd_dm_op_max_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (!kbufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (copy_from_user(kbufs, kdata.ubufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			   sizeof(*kbufs) * kdata.num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	for (i = 0; i < kdata.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			rc = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (!access_ok(kbufs[i].uptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			       kbufs[i].size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		nr_pages += DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			offset_in_page(kbufs[i].uptr) + kbufs[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (!xbufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		nr_pages = pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	for (i = 0; i < kdata.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		xbufs[i].size = kbufs[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	xen_preemptible_hcall_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	xen_preemptible_hcall_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	unlock_pages(pages, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	kfree(xbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	kfree(kbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	domid_t dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (copy_from_user(&dom, udata, sizeof(dom)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	/* Set restriction to the specified domain, or check it matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (data->domid == DOMID_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		data->domid = dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	else if (data->domid != dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static long privcmd_ioctl_mmap_resource(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 				struct privcmd_mmap_resource __user *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct privcmd_mmap_resource kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	xen_pfn_t *pfns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct xen_mem_acquire_resource xdata = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (copy_from_user(&kdata, udata, sizeof(kdata)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	/* If restriction is in place, check the domid matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/* Both fields must be set or unset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (!!kdata.addr != !!kdata.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	xdata.domid = kdata.dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	xdata.type = kdata.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	xdata.id = kdata.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (!kdata.addr && !kdata.num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		/* Query the size of the resource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		return __put_user(xdata.nr_frames, &udata->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	mmap_write_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	vma = find_vma(mm, kdata.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (!vma || vma->vm_ops != &privcmd_vm_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (!pfns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	    xen_feature(XENFEAT_auto_translated_physmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		rc = alloc_empty_pages(vma, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		pages = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		for (i = 0; i < kdata.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			xen_pfn_t pfn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 				page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		vma->vm_private_data = PRIV_VMA_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	xdata.frame = kdata.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	xdata.nr_frames = kdata.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	set_xen_guest_handle(xdata.frame_list, pfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	xen_preemptible_hcall_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	xen_preemptible_hcall_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	    xen_feature(XENFEAT_auto_translated_physmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		unsigned int domid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			DOMID_SELF : kdata.dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		int num, *errs = (int *)pfns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		num = xen_remap_domain_mfn_array(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 						 kdata.addr & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 						 pfns, kdata.num, errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 						 vma->vm_page_prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 						 domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 						 vma->vm_private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		if (num < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			rc = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		else if (num != kdata.num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				rc = errs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 				if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	kfree(pfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static long privcmd_ioctl(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			  unsigned int cmd, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	int ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	void __user *udata = (void __user *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	case IOCTL_PRIVCMD_HYPERCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		ret = privcmd_ioctl_hypercall(file, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	case IOCTL_PRIVCMD_MMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		ret = privcmd_ioctl_mmap(file, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	case IOCTL_PRIVCMD_MMAPBATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		ret = privcmd_ioctl_mmap_batch(file, udata, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	case IOCTL_PRIVCMD_MMAPBATCH_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		ret = privcmd_ioctl_mmap_batch(file, udata, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	case IOCTL_PRIVCMD_DM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		ret = privcmd_ioctl_dm_op(file, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	case IOCTL_PRIVCMD_RESTRICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		ret = privcmd_ioctl_restrict(file, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	case IOCTL_PRIVCMD_MMAP_RESOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		ret = privcmd_ioctl_mmap_resource(file, udata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static int privcmd_open(struct inode *ino, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* DOMID_INVALID implies no restriction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	data->domid = DOMID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	file->private_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static int privcmd_release(struct inode *ino, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct privcmd_data *data = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) static void privcmd_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct page **pages = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	int numpgs = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		xen_free_unpopulated_pages(numpgs, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			numpgs, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) static vm_fault_t privcmd_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	       vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	       vmf->pgoff, (void *)vmf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) static const struct vm_operations_struct privcmd_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	.close = privcmd_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	.fault = privcmd_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 * how to recreate these mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			 VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	vma->vm_ops = &privcmd_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	vma->vm_private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  * For MMAPBATCH*. This allows asserting the singleshot mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  * on a per pfn/pte basis. Mapping calls that fail with ENOENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * can be then retried until success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return pte_none(*pte) ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) static int privcmd_vma_range_is_mapped(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	           struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	           unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	           unsigned long nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				   is_mapped_fn, NULL) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) const struct file_operations xen_privcmd_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	.unlocked_ioctl = privcmd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	.open = privcmd_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	.release = privcmd_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	.mmap = privcmd_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) EXPORT_SYMBOL_GPL(xen_privcmd_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) static struct miscdevice privcmd_dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	.minor = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	.name = "xen/privcmd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	.fops = &xen_privcmd_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static int __init privcmd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	err = misc_register(&privcmd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		pr_err("Could not register Xen privcmd device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	err = misc_register(&xen_privcmdbuf_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		pr_err("Could not register Xen hypercall-buf device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		misc_deregister(&privcmd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void __exit privcmd_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	misc_deregister(&privcmd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	misc_deregister(&xen_privcmdbuf_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) module_init(privcmd_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) module_exit(privcmd_exit);