Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * DMABUF CMA heap exporter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Also utilizing parts of Andrew Davis' SRAM heap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *	Andrew F. Davis <afd@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) struct cma_heap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	struct dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct cma *cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) struct cma_heap_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct cma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct list_head attachments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct page *cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	pgoff_t pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	int vmap_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) struct dma_heap_attachment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct sg_table table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	bool mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static int cma_heap_attach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			   struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct dma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	a = kzalloc(sizeof(*a), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 					buffer->pagecount, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 					buffer->pagecount << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	a->dev = attachment->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	INIT_LIST_HEAD(&a->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	a->mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	attachment->priv = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	list_add(&a->list, &buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void cma_heap_detach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			    struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct dma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	list_del(&a->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	sg_free_table(&a->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 					     enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct dma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct sg_table *table = &a->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int attrs = attachment->dma_map_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	a->mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 				   struct sg_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				   enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct dma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	int attrs = attachment->dma_map_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	a->mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 					     enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct dma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (buffer->vmap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	list_for_each_entry(a, &buffer->attachments, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		if (!a->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 					   enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct dma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (buffer->vmap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	list_for_each_entry(a, &buffer->attachments, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		if (!a->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct cma_heap_buffer *buffer = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (vmf->pgoff > buffer->pagecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	vmf->page = buffer->pages[vmf->pgoff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	get_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static const struct vm_operations_struct dma_heap_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	.fault = cma_heap_vm_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	vma->vm_ops = &dma_heap_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	vma->vm_private_data = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void *cma_heap_vmap(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (buffer->vmap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		buffer->vmap_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		vaddr = buffer->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	vaddr = cma_heap_do_vmap(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (IS_ERR(vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	buffer->vaddr = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	buffer->vmap_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!--buffer->vmap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		vunmap(buffer->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		buffer->vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct cma_heap *cma_heap = buffer->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (buffer->vmap_cnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		vunmap(buffer->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/* free page list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	kfree(buffer->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	/* release memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static const struct dma_buf_ops cma_heap_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	.attach = cma_heap_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	.detach = cma_heap_detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	.map_dma_buf = cma_heap_map_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	.unmap_dma_buf = cma_heap_unmap_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	.mmap = cma_heap_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	.vmap = cma_heap_vmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	.vunmap = cma_heap_vunmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	.release = cma_heap_dma_buf_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 					 unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 					 unsigned long fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 					 unsigned long heap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct cma_heap_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	size_t size = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	pgoff_t pagecount = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned long align = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	struct page *cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	pgoff_t pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	INIT_LIST_HEAD(&buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	mutex_init(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	buffer->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (align > CONFIG_CMA_ALIGNMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		align = CONFIG_CMA_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (!cma_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		goto free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	/* Clear the cma pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (PageHighMem(cma_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		unsigned long nr_clear_pages = pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		struct page *page = cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		while (nr_clear_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			void *vaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			memset(vaddr, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			 * Avoid wasting time zeroing memory if the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			 * has been killed by by SIGKILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 				goto free_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			nr_clear_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		memset(page_address(cma_pages), 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!buffer->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		goto free_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	for (pg = 0; pg < pagecount; pg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		buffer->pages[pg] = &cma_pages[pg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	buffer->cma_pages = cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	buffer->heap = cma_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	buffer->pagecount = pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	/* create the dmabuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	exp_info.exp_name = dma_heap_get_name(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	exp_info.ops = &cma_heap_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	exp_info.size = buffer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	exp_info.flags = fd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	exp_info.priv = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	dmabuf = dma_buf_export(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (IS_ERR(dmabuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		ret = PTR_ERR(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	kfree(buffer->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) free_cma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	cma_release(cma_heap->cma, cma_pages, pagecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static const struct dma_heap_ops cma_heap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	.allocate = cma_heap_allocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int __add_cma_heap(struct cma *cma, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	struct cma_heap *cma_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	struct dma_heap_export_info exp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	if (!cma_heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	cma_heap->cma = cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	exp_info.name = cma_get_name(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	exp_info.ops = &cma_heap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	exp_info.priv = cma_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	cma_heap->heap = dma_heap_add(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (IS_ERR(cma_heap->heap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		int ret = PTR_ERR(cma_heap->heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		kfree(cma_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int add_default_cma_heap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	struct cma *default_cma = dev_get_cma_area(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (default_cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		ret = __add_cma_heap(default_cma, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) module_init(add_default_cma_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) MODULE_DESCRIPTION("DMA-BUF CMA Heap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) MODULE_LICENSE("GPL v2");