Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Legacy: Generic DRM Buffer Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Author: Gareth Hughes <gareth@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/shmparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <drm/drm_agpsupport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include "drm_legacy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 						  struct drm_local_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct drm_map_list *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	list_for_each_entry(entry, &dev->maplist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		 * while PCI resources may live above that, we only compare the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		 * lower 32 bits of the map offset for maps of type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		 * It is assumed that if a driver have more than one resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		 * of each type, the lower 32 bits are different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		if (!entry->map ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		    map->type != entry->map->type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		    entry->master != dev->master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		switch (map->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		case _DRM_SHM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 			if (map->flags != _DRM_CONTAINS_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		case _DRM_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		case _DRM_FRAME_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 			if ((entry->map->offset & 0xffffffff) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 			    (map->offset & 0xffffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 				return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		default: /* Make gcc happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		if (entry->map->offset == map->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			  unsigned long user_token, int hashed_handle, int shm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	int use_hashed_handle, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	unsigned long add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #if (BITS_PER_LONG == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #elif (BITS_PER_LONG == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	use_hashed_handle = hashed_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #error Unsupported long size. Neither 64 nor 32 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	if (!use_hashed_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		hash->key = user_token >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		ret = drm_ht_insert_item(&dev->map_hash, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		if (ret != -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (shm && (SHMLBA > PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		/* For shared memory, we have to preserve the SHMLBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		 * bits of the eventual vma->vm_pgoff value during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		 * mmap().  Otherwise we run into cache aliasing problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		 * on some platforms.  On these platforms, the pgoff of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		 * a mmap() request is used to pick a suitable virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		 * address for the mmap() region such that it will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		 * cause cache aliasing problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		 * Therefore, make sure the SHMLBA relevant bits of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		 * hash value we use are equal to those in the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		 * kernel virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		shift = bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	return drm_ht_just_insert_please(&dev->map_hash, hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 					 user_token, 32 - PAGE_SHIFT - 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 					 shift, add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * Core function to create a range of memory available for mapping by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * non-root process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * Adjusts the memory offset to its absolute value according to the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * applicable and if supported by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			   unsigned int size, enum drm_map_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			   enum drm_map_flags flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			   struct drm_map_list **maplist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct drm_local_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct drm_map_list *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	unsigned long user_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	map = kmalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	map->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	map->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	map->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	map->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	/* Only allow shared memory to be removable since we only keep enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	 * book keeping information about shared memory to allow for removal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	 * when processes fork.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		  (unsigned long long)map->offset, map->size, map->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	/* page-align _DRM_SHM maps. They are allocated here so there is no security
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	 * hole created by that and it works around various broken drivers that use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	 * a non-aligned quantity to map the SAREA. --BenH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (map->type == _DRM_SHM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		map->size = PAGE_ALIGN(map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	map->mtrr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	map->handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	switch (map->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	case _DRM_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	case _DRM_FRAME_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		if (map->offset + (map->size-1) < map->offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		    map->offset < virt_to_phys(high_memory)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		/* Some drivers preinitialize some maps, without the X Server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		 * needing to be aware of it.  Therefore, we just return success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		 * when the server tries to create a duplicate map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		list = drm_find_matching_map(dev, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		if (list != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			if (list->map->size != map->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 				DRM_DEBUG("Matching maps of type %d with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 					  "mismatched sizes, (%ld vs %ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 					  map->type, map->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 					  list->map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 				list->map->size = map->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			*maplist = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		if (map->type == _DRM_FRAME_BUFFER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		    (map->flags & _DRM_WRITE_COMBINING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			map->mtrr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 				arch_phys_wc_add(map->offset, map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		if (map->type == _DRM_REGISTERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			if (map->flags & _DRM_WRITE_COMBINING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				map->handle = ioremap_wc(map->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 							 map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 				map->handle = ioremap(map->offset, map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			if (!map->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 				kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	case _DRM_SHM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		list = drm_find_matching_map(dev, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		if (list != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			if (list->map->size != map->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 				DRM_DEBUG("Matching maps of type %d with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 					  "mismatched sizes, (%ld vs %ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 					  map->type, map->size, list->map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 				list->map->size = map->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			*maplist = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		map->handle = vmalloc_user(map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		DRM_DEBUG("%lu %d %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			  map->size, order_base_2(map->size), map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		if (!map->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		map->offset = (unsigned long)map->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		if (map->flags & _DRM_CONTAINS_LOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			/* Prevent a 2nd X Server from creating a 2nd lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			if (dev->master->lock.hw_lock != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 				vfree(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 				kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;	/* Pointer to lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	case _DRM_AGP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		struct drm_agp_mem *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		int valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		if (!dev->agp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		map->offset += dev->hose->mem_space->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		/* In some cases (i810 driver), user space may have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		 * added the AGP base itself, because dev->agp->base previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		 * only got set during AGP enable.  So, only add the base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		 * address if the map's offset isn't already within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		 * aperture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		if (map->offset < dev->agp->base ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		    map->offset > dev->agp->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			map->offset += dev->agp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		/* This assumes the DRM is in total control of AGP space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		 * It's not always the case as AGP can be in the control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		 * of user space (i.e. i810 driver). So this loop will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		 * skipped and we double check that dev->agp->memory is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		 * actually set as well as being invalid before EPERM'ing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		list_for_each_entry(entry, &dev->agp->memory, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			if ((map->offset >= entry->bound) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		if (!list_empty(&dev->agp->memory) && !valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			  (unsigned long long)map->offset, map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	case _DRM_SCATTER_GATHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		if (!dev->sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		map->offset += (unsigned long)dev->sg->virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	case _DRM_CONSISTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		 * As we're limiting the address to 2^32-1 (or less),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		 * casting it down to 32 bits is no problem, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		 * need to point to a 64bit variable first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		map->handle = dma_alloc_coherent(&dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 						 map->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 						 &map->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 						 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		if (!map->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	list = kzalloc(sizeof(*list), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (!list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (map->type == _DRM_REGISTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			iounmap(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	list->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	list_add(&list->head, &dev->maplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	/* Assign a 32-bit handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	/* We do it here so that dev->struct_mutex protects the increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		map->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	ret = drm_map_handle(dev, &list->hash, user_token, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			     (map->type == _DRM_SHM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		if (map->type == _DRM_REGISTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			iounmap(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		kfree(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	list->user_token = list->hash.key << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (!(map->flags & _DRM_DRIVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		list->master = dev->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	*maplist = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		      unsigned int size, enum drm_map_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct drm_map_list *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		*map_ptr = list->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) EXPORT_SYMBOL(drm_legacy_addmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 					 unsigned int token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct drm_map_list *_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	list_for_each_entry(_entry, &dev->maplist, head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		if (_entry->user_token == token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			return _entry->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) EXPORT_SYMBOL(drm_legacy_findmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  * Ioctl to specify a range of memory that is available for mapping by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * non-root process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * \param arg pointer to a drm_map structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * \return zero on success or a negative value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			    struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	struct drm_map *map = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	struct drm_map_list *maplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	    !drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			      map->flags, &maplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	map->handle = (void *)(unsigned long)maplist->user_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	 * It appears that there are no users of this value whatsoever --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	 * drmAddMap just discards it.  Let's not encourage its use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 *  it's not a real mtrr index anymore.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	map->mtrr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * Get a mapping information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * \param arg user argument, pointing to a drm_map structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * Searches for the mapping with the specified offset and copies its information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  * into userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			    struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	struct drm_map *map = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	struct drm_map_list *r_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	    !drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	idx = map->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	list_for_each(list, &dev->maplist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		if (i == idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			r_list = list_entry(list, struct drm_map_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (!r_list || !r_list->map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	map->offset = r_list->map->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	map->size = r_list->map->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	map->type = r_list->map->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	map->flags = r_list->map->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	map->handle = (void *)(unsigned long) r_list->user_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * Remove a map private from list and deallocate resources if the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * isn't in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * Searches the map on drm_device::maplist, removes it from the list, see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * it's being used, and free any associated resource (such as MTRR's) if it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * being on use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * \sa drm_legacy_addmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct drm_map_list *r_list = NULL, *list_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct drm_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	/* Find the list entry for the map and remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (r_list->map == map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			master = r_list->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			list_del(&r_list->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			drm_ht_remove_key(&dev->map_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 					  r_list->user_token >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			kfree(r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	switch (map->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	case _DRM_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		iounmap(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	case _DRM_FRAME_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		arch_phys_wc_del(map->mtrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	case _DRM_SHM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		vfree(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			if (dev->sigdata.lock == master->lock.hw_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				dev->sigdata.lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			master->lock.hw_lock = NULL;   /* SHM removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			master->lock.file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			wake_up_interruptible_all(&master->lock.lock_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	case _DRM_AGP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	case _DRM_SCATTER_GATHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	case _DRM_CONSISTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		dma_free_coherent(&dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				  map->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				  map->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				  map->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) EXPORT_SYMBOL(drm_legacy_rmmap_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	    !drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	drm_legacy_rmmap_locked(dev, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) EXPORT_SYMBOL(drm_legacy_rmmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct drm_map_list *r_list, *list_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		if (r_list->master == master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			drm_legacy_rmmap_locked(dev, r_list->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			r_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) void drm_legacy_rmmaps(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct drm_map_list *r_list, *list_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		drm_legacy_rmmap(dev, r_list->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * the last close of the device, and this is necessary for cleanup when things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * exit uncleanly.  Therefore, having userland manually remove mappings seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * like a pointless exercise since they're going away anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * One use case might be after addmap is allowed for normal users for SHM and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * gets used by drivers that the server doesn't need to care about.  This seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * unlikely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * \param arg pointer to a struct drm_map structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  * \return zero on success or a negative value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			   struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	struct drm_map *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	struct drm_local_map *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	struct drm_map_list *r_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	    !drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	list_for_each_entry(r_list, &dev->maplist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		if (r_list->map &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		    r_list->user_token == (unsigned long)request->handle &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		    r_list->map->flags & _DRM_REMOVABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			map = r_list->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/* List has wrapped around to the head pointer, or it's empty we didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * find anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (list_empty(&dev->maplist) || !map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/* Register and framebuffer maps are permanent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	ret = drm_legacy_rmmap_locked(dev, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  * Cleanup after an error on one of the addbufs() functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * \param dev DRM device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * \param entry buffer entry where the error occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  * Frees any pages and buffers associated with the given entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static void drm_cleanup_buf_error(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				  struct drm_buf_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (entry->seg_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		for (i = 0; i < entry->seg_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			if (entry->seglist[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				drm_pci_free(dev, entry->seglist[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		kfree(entry->seglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		entry->seg_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (entry->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		for (i = 0; i < entry->buf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			kfree(entry->buflist[i].dev_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		kfree(entry->buflist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		entry->buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) #if IS_ENABLED(CONFIG_AGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * Add AGP buffers for DMA transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * \param dev struct drm_device to which the buffers are to be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * \param request pointer to a struct drm_buf_desc describing the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  * After some sanity checks creates a drm_buf structure for each buffer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  * reallocates the buffer list of the same size order to accommodate the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) int drm_legacy_addbufs_agp(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			   struct drm_buf_desc *request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	struct drm_buf_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	struct drm_agp_mem *agp_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct drm_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	unsigned long agp_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	int alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	int page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	int total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	int byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	int i, valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct drm_buf **temp_buflist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	count = request->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	order = order_base_2(request->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	size = 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	alignment = (request->flags & _DRM_PAGE_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	    ? PAGE_ALIGN(size) : size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	total = PAGE_SIZE << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	agp_offset = dev->agp->base + request->agp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	DRM_DEBUG("count:      %d\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	DRM_DEBUG("order:      %d\n", order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	DRM_DEBUG("size:       %d\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	DRM_DEBUG("alignment:  %d\n", alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	DRM_DEBUG("page_order: %d\n", page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	DRM_DEBUG("total:      %d\n", total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	/* Make sure buffers are located in AGP memory that we own */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		if ((agp_offset >= agp_entry->bound) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (!list_empty(&dev->agp->memory) && !valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		DRM_DEBUG("zone invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	spin_lock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (dev->buf_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	atomic_inc(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	entry = &dma->bufs[order];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (entry->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		return -ENOMEM;	/* May only call once for each order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (count < 0 || count > 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (!entry->buflist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	entry->buf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	entry->page_order = page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	while (entry->buf_count < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		buf = &entry->buflist[entry->buf_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		buf->idx = dma->buf_count + entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		buf->total = alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		buf->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		buf->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		buf->offset = (dma->byte_count + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		buf->bus_address = agp_offset + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		buf->address = (void *)(agp_offset + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		buf->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		buf->waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		buf->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		buf->file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		buf->dev_priv_size = dev->driver->dev_priv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		if (!buf->dev_private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			/* Set count correctly so we free the proper amount. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			entry->buf_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		offset += alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		entry->buf_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		byte_count += PAGE_SIZE << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	DRM_DEBUG("byte_count: %d\n", byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	temp_buflist = krealloc(dma->buflist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 				(dma->buf_count + entry->buf_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				sizeof(*dma->buflist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (!temp_buflist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		/* Free the entry because it isn't valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	dma->buflist = temp_buflist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	for (i = 0; i < entry->buf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	dma->buf_count += entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	dma->seg_count += entry->seg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	dma->page_count += byte_count >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	dma->byte_count += byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	request->count = entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	request->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	dma->flags = _DRM_DMA_USE_AGP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) EXPORT_SYMBOL(drm_legacy_addbufs_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) #endif /* CONFIG_AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) int drm_legacy_addbufs_pci(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			   struct drm_buf_desc *request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	int page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	struct drm_buf_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	drm_dma_handle_t *dmah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct drm_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	int alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	int byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	int page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	unsigned long *temp_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct drm_buf **temp_buflist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	count = request->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	order = order_base_2(request->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	size = 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		  request->count, request->size, size, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	alignment = (request->flags & _DRM_PAGE_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	    ? PAGE_ALIGN(size) : size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	total = PAGE_SIZE << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	spin_lock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (dev->buf_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	atomic_inc(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	entry = &dma->bufs[order];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (entry->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		return -ENOMEM;	/* May only call once for each order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (count < 0 || count > 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (!entry->buflist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (!entry->seglist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		kfree(entry->buflist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	/* Keep the original pagelist until we know all the allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	 * have succeeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				      sizeof(*dma->pagelist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (!temp_pagelist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		kfree(entry->buflist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		kfree(entry->seglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	memcpy(temp_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	DRM_DEBUG("pagelist: %d entries\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		  dma->page_count + (count << page_order));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	entry->buf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	entry->page_order = page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	page_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	while (entry->buf_count < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (!dmah) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			/* Set count correctly so we free the proper amount. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			entry->buf_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			entry->seg_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			kfree(temp_pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		entry->seglist[entry->seg_count++] = dmah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		for (i = 0; i < (1 << page_order); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			DRM_DEBUG("page %d @ 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 				  dma->page_count + page_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			temp_pagelist[dma->page_count + page_count++]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		for (offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		     offset + size <= total && entry->buf_count < count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		     offset += alignment, ++entry->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			buf = &entry->buflist[entry->buf_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			buf->idx = dma->buf_count + entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			buf->total = alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			buf->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			buf->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			buf->offset = (dma->byte_count + byte_count + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			buf->address = (void *)(dmah->vaddr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			buf->bus_address = dmah->busaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			buf->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			buf->waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			buf->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			buf->file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			buf->dev_priv_size = dev->driver->dev_priv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			buf->dev_private = kzalloc(buf->dev_priv_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 						GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			if (!buf->dev_private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				/* Set count correctly so we free the proper amount. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 				entry->buf_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				entry->seg_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				kfree(temp_pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			DRM_DEBUG("buffer %d @ %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 				  entry->buf_count, buf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		byte_count += PAGE_SIZE << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	temp_buflist = krealloc(dma->buflist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 				(dma->buf_count + entry->buf_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				sizeof(*dma->buflist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (!temp_buflist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		/* Free the entry because it isn't valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		kfree(temp_pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	dma->buflist = temp_buflist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	for (i = 0; i < entry->buf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/* No allocations failed, so now we can replace the original pagelist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	 * with the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (dma->page_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		kfree(dma->pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	dma->pagelist = temp_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	dma->buf_count += entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	dma->seg_count += entry->seg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	dma->page_count += entry->seg_count << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	request->count = entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	request->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (request->flags & _DRM_PCI_BUFFER_RO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		dma->flags = _DRM_DMA_USE_PCI_RO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) EXPORT_SYMBOL(drm_legacy_addbufs_pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static int drm_legacy_addbufs_sg(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				 struct drm_buf_desc *request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct drm_buf_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	struct drm_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	unsigned long agp_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	int alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	int page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	int total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	int byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct drm_buf **temp_buflist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	if (!drm_core_check_feature(dev, DRIVER_SG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	count = request->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	order = order_base_2(request->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	size = 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	alignment = (request->flags & _DRM_PAGE_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	    ? PAGE_ALIGN(size) : size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	total = PAGE_SIZE << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	agp_offset = request->agp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	DRM_DEBUG("count:      %d\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	DRM_DEBUG("order:      %d\n", order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	DRM_DEBUG("size:       %d\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	DRM_DEBUG("alignment:  %d\n", alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	DRM_DEBUG("page_order: %d\n", page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	DRM_DEBUG("total:      %d\n", total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	spin_lock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (dev->buf_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	atomic_inc(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	entry = &dma->bufs[order];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if (entry->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return -ENOMEM;	/* May only call once for each order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (count < 0 || count > 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	if (!entry->buflist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	entry->buf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	entry->page_order = page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	while (entry->buf_count < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		buf = &entry->buflist[entry->buf_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		buf->idx = dma->buf_count + entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		buf->total = alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		buf->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		buf->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		buf->offset = (dma->byte_count + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		buf->bus_address = agp_offset + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		buf->address = (void *)(agp_offset + offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 					+ (unsigned long)dev->sg->virtual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		buf->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		buf->waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		buf->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		buf->file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		buf->dev_priv_size = dev->driver->dev_priv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		if (!buf->dev_private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			/* Set count correctly so we free the proper amount. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			entry->buf_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		offset += alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		entry->buf_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		byte_count += PAGE_SIZE << page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	DRM_DEBUG("byte_count: %d\n", byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	temp_buflist = krealloc(dma->buflist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 				(dma->buf_count + entry->buf_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 				sizeof(*dma->buflist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	if (!temp_buflist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		/* Free the entry because it isn't valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		drm_cleanup_buf_error(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	dma->buflist = temp_buflist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	for (i = 0; i < entry->buf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	dma->buf_count += entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	dma->seg_count += entry->seg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	dma->page_count += byte_count >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	dma->byte_count += byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	request->count = entry->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	request->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	dma->flags = _DRM_DMA_USE_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	atomic_dec(&dev->buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  * Add buffers for DMA transfers (ioctl).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)  * \param arg pointer to a struct drm_buf_desc request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * According with the memory type specified in drm_buf_desc::flags and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * build options, it dispatches the call either to addbufs_agp(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * PCI memory respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) int drm_legacy_addbufs(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		       struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	struct drm_buf_desc *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) #if IS_ENABLED(CONFIG_AGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (request->flags & _DRM_AGP_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		ret = drm_legacy_addbufs_agp(dev, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (request->flags & _DRM_SG_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		ret = drm_legacy_addbufs_sg(dev, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	else if (request->flags & _DRM_FB_BUFFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		ret = drm_legacy_addbufs_pci(dev, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * Get information about the buffer mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * This was originally mean for debugging purposes, or by a sophisticated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * client library to determine how best to use the available buffers (e.g.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  * large buffers can be used for image transfer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * \param arg pointer to a drm_buf_info structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * Increments drm_device::buf_use while holding the drm_device::buf_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  * lock, preventing of allocating more buffers after this call. Information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  * about each requested buffer is then copied into user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) int __drm_legacy_infobufs(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			void *data, int *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			int (*f)(void *, int, struct drm_buf_entry *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	spin_lock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (atomic_read(&dev->buf_alloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	++dev->buf_use;		/* Can't allocate more after this call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		if (dma->bufs[i].buf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	DRM_DEBUG("count = %d\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (*p >= count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			struct drm_buf_entry *from = &dma->bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			if (from->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 				if (f(data, count, from) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 					return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				DRM_DEBUG("%d %d %d %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 					  i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 					  dma->bufs[i].buf_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 					  dma->bufs[i].buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 					  dma->bufs[i].low_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 					  dma->bufs[i].high_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 				++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	*p = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	struct drm_buf_info *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	struct drm_buf_desc __user *to = &request->list[count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	struct drm_buf_desc v = {.count = from->buf_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 				 .size = from->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 				 .low_mark = from->low_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 				 .high_mark = from->high_mark};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int drm_legacy_infobufs(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct drm_buf_info *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  * Specifies a low and high water mark for buffer allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * \param arg a pointer to a drm_buf_desc structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  * Verifies that the size order is bounded between the admissible orders and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * updates the respective drm_device_dma::bufs entry low and high water mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * \note This ioctl is deprecated and mostly never used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) int drm_legacy_markbufs(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	struct drm_buf_desc *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	struct drm_buf_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	DRM_DEBUG("%d, %d, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		  request->size, request->low_mark, request->high_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	order = order_base_2(request->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	entry = &dma->bufs[order];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	entry->low_mark = request->low_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	entry->high_mark = request->high_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  * Unreserve the buffers in list, previously reserved using drmDMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * \param arg pointer to a drm_buf_free structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)  * Calls free_buffer() for each used buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  * This function is primarily used for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) int drm_legacy_freebufs(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 			struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct drm_buf_free *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	struct drm_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	DRM_DEBUG("%d\n", request->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	for (i = 0; i < request->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		if (idx < 0 || idx >= dma->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			DRM_ERROR("Index %d (of %d max)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				  idx, dma->buf_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		idx = array_index_nospec(idx, dma->buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		buf = dma->buflist[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		if (buf->file_priv != file_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			DRM_ERROR("Process %d freeing buffer not owned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 				  task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		drm_legacy_free_buffer(dev, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  * Maps all of the DMA buffers into client-virtual space (ioctl).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  * \param arg pointer to a drm_buf_map structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)  * drm_mmap_dma().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			 void __user **v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			 int (*f)(void *, int, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 				 struct drm_buf *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 				 struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	int retcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	unsigned long virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	spin_lock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (atomic_read(&dev->buf_alloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	dev->buf_use++;		/* Can't allocate more after this call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	spin_unlock(&dev->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (*p >= dma->buf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		    || (drm_core_check_feature(dev, DRIVER_SG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			&& (dma->flags & _DRM_DMA_USE_SG))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			struct drm_local_map *map = dev->agp_buffer_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			unsigned long token = dev->agp_buffer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				retcode = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			virtual = vm_mmap(file_priv->filp, 0, map->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 					  PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 					  MAP_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 					  token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 					  PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 					  MAP_SHARED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		if (virtual > -1024UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			/* Real error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			retcode = (signed long)virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		*v = (void __user *)virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		for (i = 0; i < dma->buf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			if (f(data, i, virtual, dma->buflist[i]) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 				retcode = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)       done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	*p = dma->buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static int map_one_buf(void *data, int idx, unsigned long virtual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			struct drm_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	struct drm_buf_map *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	unsigned long address = virtual + buf->offset;	/* *** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (copy_to_user(&request->list[idx].idx, &buf->idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			 sizeof(request->list[0].idx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (copy_to_user(&request->list[idx].total, &buf->total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			 sizeof(request->list[0].total)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (clear_user(&request->list[idx].used, sizeof(int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (copy_to_user(&request->list[idx].address, &address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			 sizeof(address)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) int drm_legacy_mapbufs(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		       struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct drm_buf_map *request = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	return __drm_legacy_mapbufs(dev, data, &request->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 				    &request->virtual, map_one_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 				    file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		  struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (dev->driver->dma_ioctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		return dev->driver->dma_ioctl(dev, data, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	struct drm_map_list *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	list_for_each_entry(entry, &dev->maplist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		if (entry->map && entry->map->type == _DRM_SHM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			return entry->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) EXPORT_SYMBOL(drm_legacy_getsarea);