Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  psb GEM interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2011, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Authors: Alan Cox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *	-	we need to work out if the MMU is relevant (eg for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *		accelerated operations on a GEM object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <drm/drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <drm/drm_vma_manager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "psb_drv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) void psb_gem_free_object(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	/* Remove the list map if one is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	drm_gem_free_mmap_offset(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	drm_gem_object_release(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	/* This must occur last as it frees up the memory of the GEM object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	psb_gtt_free_range(obj->dev, gtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) int psb_gem_get_aperture(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 				struct drm_file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *	psb_gem_create		-	create a mappable object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *	@file: the DRM file of the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *	@dev: our device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *	@size: the size requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *	@handlep: returned handle (opaque number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *	Create a GEM object, fill in the boilerplate and attach a handle to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *	it so that userspace can speak about it. This does the core work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *	for the various methods that do/will create GEM objects for things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		   u32 *handlep, int stolen, u32 align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct gtt_range *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	size = roundup(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/* Allocate our object - for now a direct gtt range which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	   stolen memory backed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (r == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* Initialize the extra goodies GEM needs to do all the hard work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (drm_gem_object_init(dev, &r->gem, size) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		psb_gtt_free_range(dev, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		/* GEM doesn't give an error code so use -ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		dev_err(dev->dev, "GEM init failed for %lld\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* Limit the object to 32bit mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	/* Give the object a handle so we can carry it more easily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	ret = drm_gem_handle_create(file, &r->gem, &handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 							&r->gem, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		drm_gem_object_release(&r->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		psb_gtt_free_range(dev, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	/* We have the initial and handle reference but need only one now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	drm_gem_object_put(&r->gem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	*handlep = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  *	psb_gem_dumb_create	-	create a dumb buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *	@drm_file: our client file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  *	@dev: our device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *	@args: the requested arguments copied from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *	Allocate a buffer suitable for use for a frame buffer of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *	form described by user space. Give userspace a handle by which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *	to reference it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			struct drm_mode_create_dumb *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	args->size = args->pitch * args->height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return psb_gem_create(file, dev, args->size, &args->handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			      PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *	psb_gem_fault		-	pagefault handler for GEM objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *	@vma: the VMA of the GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  *	@vmf: fault detail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  *	does most of the work for us including the actual map/unmap calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  *	but we need to do the actual page work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *	This code eventually needs to handle faulting objects in and out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  *	of the GTT and repacking it when we run out of space. We can put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  *	that off for now and for our simple uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  *	The VMA was set up by GEM. In doing so it also ensured that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  *	vma->vm_private_data points to the GEM object that is backing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  *	mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) vm_fault_t psb_gem_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	struct gtt_range *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	pgoff_t page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct drm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct drm_psb_private *dev_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	obj = vma->vm_private_data;	/* GEM object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	r = container_of(obj, struct gtt_range, gem);	/* Get the gtt range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	/* Make sure we don't parallel update on a fault, nor move or remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	   something from beneath our feet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	mutex_lock(&dev_priv->mmap_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* For now the mmap pins the object and it stays pinned. As things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	   stand that will do us no harm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if (r->mmapping == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		err = psb_gtt_pin(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			dev_err(dev->dev, "gma500: pin failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			ret = vmf_error(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		r->mmapping = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/* Page relative to the VMA start - we must calculate this ourselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	   because vmf->pgoff is the fake GEM offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/* CPU view of the page, don't go via the GART for CPU writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (r->stolen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		pfn = page_to_pfn(r->pages[page_offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	mutex_unlock(&dev_priv->mmap_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }