Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * DMABUF System heap exporter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2011 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2019, 2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Portions based off of Andrew Davis' SRAM heap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *	Andrew F. Davis <afd@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "page_pool.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "deferred-free-helper.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static struct dma_heap *sys_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static struct dma_heap *sys_uncached_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct system_heap_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct list_head attachments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct sg_table sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int vmap_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct deferred_freelist_item deferred_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	bool uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) struct dma_heap_attachment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	bool mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	bool uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				| __GFP_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * of order 0 pages can significantly improve the performance of many IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * by reducing TLB pressure and time spent updating page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static const unsigned int orders[] = {8, 4, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define NUM_ORDERS ARRAY_SIZE(orders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) struct dmabuf_page_pool *pools[NUM_ORDERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static struct sg_table *dup_sg_table(struct sg_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct sg_table *new_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct scatterlist *sg, *new_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (!new_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		kfree(new_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	new_sg = new_table->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	for_each_sgtable_sg(table, sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		new_sg = sg_next(new_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return new_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static int system_heap_attach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			      struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct dma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	a = kzalloc(sizeof(*a), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	table = dup_sg_table(&buffer->sg_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (IS_ERR(table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	a->table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	a->dev = attachment->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	INIT_LIST_HEAD(&a->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	a->mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	a->uncached = buffer->uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	attachment->priv = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	list_add(&a->list, &buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void system_heap_detach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			       struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct dma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	list_del(&a->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	sg_free_table(a->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	kfree(a->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 						enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct dma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct sg_table *table = a->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	int attr = attachment->dma_map_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (a->uncached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		attr |= DMA_ATTR_SKIP_CPU_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	a->mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 				      struct sg_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 				      enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct dma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	int attr = attachment->dma_map_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (a->uncached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		attr |= DMA_ATTR_SKIP_CPU_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	a->mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	dma_unmap_sgtable(attachment->dev, table, direction, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 						enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct dma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (buffer->vmap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (!buffer->uncached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		list_for_each_entry(a, &buffer->attachments, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			if (!a->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 					      enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct dma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (buffer->vmap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (!buffer->uncached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		list_for_each_entry(a, &buffer->attachments, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			if (!a->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			dma_sync_sgtable_for_device(a->dev, a->table, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct sg_table *table = &buffer->sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	unsigned long addr = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct sg_page_iter piter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (buffer->uncached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		struct page *page = sg_page_iter_page(&piter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 				      vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		if (addr >= vma->vm_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct sg_table *table = &buffer->sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct page **pages = vmalloc(sizeof(struct page *) * npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct page **tmp = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	struct sg_page_iter piter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	pgprot_t pgprot = PAGE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (buffer->uncached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		pgprot = pgprot_writecombine(PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	for_each_sgtable_page(table, &piter, 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		WARN_ON(tmp - pages >= npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		*tmp++ = sg_page_iter_page(&piter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	vaddr = vmap(pages, npages, VM_MAP, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	vfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void *system_heap_vmap(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (buffer->vmap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		buffer->vmap_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		vaddr = buffer->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	vaddr = system_heap_do_vmap(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (IS_ERR(vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	buffer->vaddr = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	buffer->vmap_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (!--buffer->vmap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		vunmap(buffer->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		buffer->vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	struct sg_table *sgt = &buffer->sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	struct sg_page_iter piter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct page *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	for_each_sgtable_page(sgt, &piter, 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		p = sg_page_iter_page(&piter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		vaddr = kmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		memset(vaddr, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void system_heap_buf_free(struct deferred_freelist_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 				 enum df_reason reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct system_heap_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	buffer = container_of(item, struct system_heap_buffer, deferred_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/* Zero the buffer pages before adding back to the pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (reason == DF_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (system_heap_zero_buffer(buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			reason = DF_UNDER_PRESSURE; // On failure, just free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	table = &buffer->sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	for_each_sgtable_sg(table, sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (reason == DF_UNDER_PRESSURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			__free_pages(page, compound_order(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			for (j = 0; j < NUM_ORDERS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				if (compound_order(page) == orders[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			dmabuf_page_pool_free(pools[j], page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	sg_free_table(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct system_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	deferred_free(&buffer->deferred_free, system_heap_buf_free, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static const struct dma_buf_ops system_heap_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	.attach = system_heap_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	.detach = system_heap_detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	.map_dma_buf = system_heap_map_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	.unmap_dma_buf = system_heap_unmap_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.mmap = system_heap_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	.vmap = system_heap_vmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.vunmap = system_heap_vunmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	.release = system_heap_dma_buf_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static struct page *alloc_largest_available(unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 					    unsigned int max_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	for (i = 0; i < NUM_ORDERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		if (size <  (PAGE_SIZE << orders[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		if (max_order < orders[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		page = dmabuf_page_pool_alloc(pools[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 					       unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 					       unsigned long fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 					       unsigned long heap_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 					       bool uncached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct system_heap_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	unsigned long size_remaining = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	unsigned int max_order = orders[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct list_head pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct page *page, *tmp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	int i, ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	INIT_LIST_HEAD(&buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	mutex_init(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	buffer->heap = heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	buffer->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	buffer->uncached = uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	INIT_LIST_HEAD(&pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	while (size_remaining > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		 * Avoid trying to allocate memory if the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		 * has been killed by SIGKILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			goto free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		page = alloc_largest_available(size_remaining, max_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			goto free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		list_add_tail(&page->lru, &pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		size_remaining -= page_size(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		max_order = compound_order(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	table = &buffer->sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (sg_alloc_table(table, i, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		goto free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	sg = table->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		sg_set_page(sg, page, page_size(page), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		list_del(&page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	/* create the dmabuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	exp_info.exp_name = dma_heap_get_name(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	exp_info.ops = &system_heap_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	exp_info.size = buffer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	exp_info.flags = fd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	exp_info.priv = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	dmabuf = dma_buf_export(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (IS_ERR(dmabuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		ret = PTR_ERR(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	 * For uncached buffers, we need to initially flush cpu cache, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	 * the __GFP_ZERO on the allocation means the zeroing was done by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	 * cpu and thus it is likely cached. Map (and implicitly flush) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 * unmap it now so we don't get corruption later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	if (buffer->uncached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	return dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	for_each_sgtable_sg(table, sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		struct page *p = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		__free_pages(p, compound_order(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	sg_free_table(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		__free_pages(page, compound_order(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 					    unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 					    unsigned long fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 					    unsigned long heap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static long system_get_pool_size(struct dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	long num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct dmabuf_page_pool **pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	pool = pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	for (i = 0; i < NUM_ORDERS; i++, pool++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		num_pages += ((*pool)->count[POOL_LOWPAGE] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			      (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	return num_pages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static const struct dma_heap_ops system_heap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	.allocate = system_heap_allocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	.get_pool_size = system_get_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 						     unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 						     unsigned long fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 						     unsigned long heap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Dummy function to be used until we can call coerce_mask_and_coherent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 							    unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 							    unsigned long fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 							    unsigned long heap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static struct dma_heap_ops system_uncached_heap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	/* After system_heap_create is complete, we will swap this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	.allocate = system_uncached_heap_not_initialized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static int set_heap_dev_dma(struct device *heap_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (!heap_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (!heap_dev->dma_parms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		heap_dev->dma_parms = devm_kzalloc(heap_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 						   sizeof(*heap_dev->dma_parms),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 						   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		if (!heap_dev->dma_parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			devm_kfree(heap_dev, heap_dev->dma_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static int system_heap_create(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	struct dma_heap_export_info exp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	for (i = 0; i < NUM_ORDERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		if (!pools[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 			int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 			pr_err("%s: page pool creation failed!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 				dmabuf_page_pool_destroy(pools[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	exp_info.name = "system";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	exp_info.ops = &system_heap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	exp_info.priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	sys_heap = dma_heap_add(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	if (IS_ERR(sys_heap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		return PTR_ERR(sys_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	exp_info.name = "system-uncached";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	exp_info.ops = &system_uncached_heap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	exp_info.priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	sys_uncached_heap = dma_heap_add(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (IS_ERR(sys_uncached_heap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		return PTR_ERR(sys_uncached_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_heap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	mb(); /* make sure we only set allocate after dma_mask is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) module_init(system_heap_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) MODULE_LICENSE("GPL v2");