Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * \file drm_vm.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Memory mapping for DRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * \author Rickard E. (Rik) Faith <faith@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * \author Gareth Hughes <gareth@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #if defined(__ia64__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <linux/mem_encrypt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <drm/drm_agpsupport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <drm/drm_framebuffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include "drm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include "drm_legacy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct drm_vma_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static void drm_vm_open(struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static void drm_vm_close(struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static pgprot_t drm_io_prot(struct drm_local_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			    struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* We don't want graphics memory to be mapped encrypted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	tmp = pgprot_decrypted(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)     defined(__mips__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		tmp = pgprot_noncached(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		tmp = pgprot_writecombine(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #elif defined(__ia64__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 				    vma->vm_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		tmp = pgprot_writecombine(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		tmp = pgprot_noncached(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #elif defined(__sparc__) || defined(__arm__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	tmp = pgprot_noncached(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	tmp = pgprot_noncached_wc(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * \c fault method for AGP virtual memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * \param vma virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * \param address access address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * \return pointer to the page structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * Find the right map and if it's AGP memory find the real physical page to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * map, get the page, increment the use count and return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #if IS_ENABLED(CONFIG_AGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct drm_file *priv = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct drm_local_map *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct drm_map_list *r_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct drm_hash_item *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * Find the right map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (!dev->agp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		goto vm_fault_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (!dev->agp || !dev->agp->cant_use_aperture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		goto vm_fault_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		goto vm_fault_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	map = r_list->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (map && map->type == _DRM_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		 * Using vm_pgoff as a selector forces us to use this unusual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		 * addressing scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		resource_size_t offset = vmf->address - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		resource_size_t baddr = map->offset + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		struct drm_agp_mem *agpmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		 * Adjust to a bus-relative address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		baddr -= dev->hose->mem_space->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		 * It's AGP memory - find the real physical page to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		list_for_each_entry(agpmem, &dev->agp->memory, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			if (agpmem->bound <= baddr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		if (&agpmem->head == &dev->agp->memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			goto vm_fault_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		 * Get the page, inc the use count, and return it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		page = agpmem->memory->pages[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		vmf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		DRM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		     (unsigned long long)baddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		     agpmem->memory->pages[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		     (unsigned long long)offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		     page_count(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) vm_fault_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * \c nopage method for shared virtual memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * \param vma virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * \param address access address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * \return pointer to the page structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * Get the mapping, find the real physical page to map, get the page, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct drm_local_map *map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	offset = vmf->address - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	i = (unsigned long)map->handle + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	page = vmalloc_to_page((void *)i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	vmf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	DRM_DEBUG("shm_fault 0x%lx\n", offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * \c close method for shared virtual memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * \param vma virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * Deletes map information if we are the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * person to close a mapping and it's not in the global maplist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void drm_vm_shm_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct drm_file *priv = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct drm_vma_entry *pt, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct drm_local_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct drm_map_list *r_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	int found_maps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	DRM_DEBUG("0x%08lx,0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		  vma->vm_start, vma->vm_end - vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		if (pt->vma->vm_private_data == map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			found_maps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		if (pt->vma == vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			list_del(&pt->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			kfree(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	/* We were the only map that was found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		/* Check to see if we are in the maplist, if we are not, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		 * we delete this mappings information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		found_maps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		list_for_each_entry(r_list, &dev->maplist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			if (r_list->map == map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				found_maps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		if (!found_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			switch (map->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			case _DRM_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			case _DRM_FRAME_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				arch_phys_wc_del(map->mtrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				iounmap(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			case _DRM_SHM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 				vfree(map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			case _DRM_AGP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 			case _DRM_SCATTER_GATHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			case _DRM_CONSISTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 				dma_free_coherent(&dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 						  map->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 						  map->handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 						  map->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * \c fault method for DMA virtual memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * \param address access address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * \return pointer to the page structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct drm_file *priv = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct drm_device_dma *dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	unsigned long page_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		return VM_FAULT_SIGBUS;	/* Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (!dma->pagelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	offset = vmf->address - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 					/* vm_[pg]off[set] should be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	page = virt_to_page((void *)dma->pagelist[page_nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	vmf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * \c fault method for scatter-gather virtual memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * \param address access address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  * \return pointer to the page structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	struct drm_local_map *map = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	struct drm_file *priv = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct drm_sg_mem *entry = dev->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	unsigned long map_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	unsigned long page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return VM_FAULT_SIGBUS;	/* Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (!entry->pagelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	offset = vmf->address - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	page = entry->pagelist[page_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	vmf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /** AGP virtual memory operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static const struct vm_operations_struct drm_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	.fault = drm_vm_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	.open = drm_vm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	.close = drm_vm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /** Shared virtual memory operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static const struct vm_operations_struct drm_vm_shm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.fault = drm_vm_shm_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	.open = drm_vm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	.close = drm_vm_shm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /** DMA virtual memory operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static const struct vm_operations_struct drm_vm_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	.fault = drm_vm_dma_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	.open = drm_vm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	.close = drm_vm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /** Scatter-gather virtual memory operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static const struct vm_operations_struct drm_vm_sg_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	.fault = drm_vm_sg_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	.open = drm_vm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	.close = drm_vm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void drm_vm_open_locked(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			       struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	struct drm_vma_entry *vma_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	DRM_DEBUG("0x%08lx,0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		  vma->vm_start, vma->vm_end - vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (vma_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		vma_entry->vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		vma_entry->pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		list_add(&vma_entry->head, &dev->vmalist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void drm_vm_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct drm_file *priv = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	drm_vm_open_locked(dev, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void drm_vm_close_locked(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct drm_vma_entry *pt, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	DRM_DEBUG("0x%08lx,0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		  vma->vm_start, vma->vm_end - vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		if (pt->vma == vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			list_del(&pt->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			kfree(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * \c close method for all virtual memory types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * \param vma virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static void drm_vm_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct drm_file *priv = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	drm_vm_close_locked(dev, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * mmap DMA memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * \param vma virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  * Sets the virtual memory area operations structure to vm_dma_ops, the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  * pointer, and calls vm_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	struct drm_file *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct drm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct drm_device_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	unsigned long length = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	dma = dev->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	/* Length must match exact page count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	if (!capable(CAP_SYS_ADMIN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #if defined(__i386__) || defined(__x86_64__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		/* Ye gads this is ugly.  With more thought
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		   we could move this up higher and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		   `protection_map' instead.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		vma->vm_page_prot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		    __pgprot(pte_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			     (pte_wrprotect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			      (__pte(pgprot_val(vma->vm_page_prot)))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	vma->vm_ops = &drm_vm_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	drm_vm_open_locked(dev, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	return dev->hose->dense_mem_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  * mmap DMA memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * \param vma virtual memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)  * If the virtual memory area has no offset associated with it then it's a DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)  * checks that the restricted flag is not set, sets the virtual memory operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  * according to the mapping type and remaps the pages. Finally sets the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)  * pointer and calls vm_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	struct drm_file *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	struct drm_local_map *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	resource_size_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct drm_hash_item *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (!priv->authenticated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	/* We check for "dma". On Apple's UniNorth, it's valid to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	 * the AGP mapped at physical address 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	 * --BenH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	if (!vma->vm_pgoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #if IS_ENABLED(CONFIG_AGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	    && (!dev->agp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	    )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		return drm_mmap_dma(filp, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		DRM_ERROR("Could not find map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	/* Check for valid size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (map->size < vma->vm_end - vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #if defined(__i386__) || defined(__x86_64__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		/* Ye gads this is ugly.  With more thought
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		   we could move this up higher and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		   `protection_map' instead.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		vma->vm_page_prot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		    __pgprot(pte_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			     (pte_wrprotect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			      (__pte(pgprot_val(vma->vm_page_prot)))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	switch (map->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) #if !defined(__arm__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	case _DRM_AGP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		if (dev->agp && dev->agp->cant_use_aperture) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 			 * On some platforms we can't talk to bus dma address from the CPU, so for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 			 * pages and mappings in fault()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #if defined(__powerpc__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 			vma->vm_ops = &drm_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		fallthrough;	/* to _DRM_FRAME_BUFFER... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	case _DRM_FRAME_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	case _DRM_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		offset = drm_core_get_reg_ofs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		vma->vm_page_prot = drm_io_prot(map, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		if (io_remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 				       (map->offset + offset) >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 				       vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 				       vma->vm_page_prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 			  " offset = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 			  map->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		vma->vm_ops = &drm_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	case _DRM_CONSISTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		/* Consistent memory is really like shared memory. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		 * it's allocated in a different way, so avoid fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		if (remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		    page_to_pfn(virt_to_page(map->handle)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		fallthrough;	/* to _DRM_SHM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	case _DRM_SHM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		vma->vm_ops = &drm_vm_shm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		vma->vm_private_data = (void *)map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	case _DRM_SCATTER_GATHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		vma->vm_ops = &drm_vm_sg_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		vma->vm_private_data = (void *)map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		return -EINVAL;	/* This should never happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	drm_vm_open_locked(dev, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	struct drm_file *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	if (drm_dev_is_unplugged(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	ret = drm_mmap_locked(filp, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) EXPORT_SYMBOL(drm_legacy_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) #if IS_ENABLED(CONFIG_DRM_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) void drm_legacy_vma_flush(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	struct drm_vma_entry *vma, *vma_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	/* Clear vma list (only needed for legacy drivers) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		list_del(&vma->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		kfree(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #endif