^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DMABUF CMA heap exporter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Also utilizing parts of Andrew Davis' SRAM heap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Andrew F. Davis <afd@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2022 Rockchip Electronics Co. Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Author: Simon Xue <xxm@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <uapi/linux/rk-dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "../../../mm/cma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "rk-dma-heap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct rk_cma_heap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct rk_dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct cma *cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct rk_cma_heap_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct rk_cma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct list_head attachments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct page *cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pgoff_t pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int vmap_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) bool attached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct rk_cma_heap_attachment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct sg_table table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bool mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int rk_cma_heap_attach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rk_cma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct sg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) size_t size = buffer->pagecount << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) a = kzalloc(sizeof(*a), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) table = &a->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret = sg_alloc_table(table, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sg_set_page(table->sgl, buffer->cma_pages, PAGE_ALIGN(size), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) a->dev = attachment->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) INIT_LIST_HEAD(&a->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) a->mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) attachment->priv = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) buffer->attached = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) list_add(&a->list, &buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void rk_cma_heap_detach(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct dma_buf_attachment *attachment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct rk_cma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) list_del(&a->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) buffer->attached = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) sg_free_table(&a->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) kfree(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static struct sg_table *rk_cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct rk_cma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct sg_table *table = &a->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int attrs = attachment->dma_map_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) a->mapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void rk_cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct sg_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct rk_cma_heap_attachment *a = attachment->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int attrs = attachment->dma_map_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) a->mapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dma_unmap_sgtable(attachment->dev, table, direction, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rk_cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct rk_cma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (buffer->vmap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) list_for_each_entry(a, &buffer->attachments, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!a->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* For userspace that not attach yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (buffer->phys && !buffer->attached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dma_sync_single_for_cpu(rk_dma_heap_get_dev(buffer->heap->heap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) buffer->phys + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rk_cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct rk_cma_heap_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (buffer->vmap_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) flush_kernel_vmap_range(buffer->vaddr, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) list_for_each_entry(a, &buffer->attachments, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!a->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dma_sync_sgtable_for_device(a->dev, &a->table, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* For userspace that not attach yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (buffer->phys && !buffer->attached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dma_sync_single_for_device(rk_dma_heap_get_dev(buffer->heap->heap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) buffer->phys + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int rk_cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int len = buffer->pagecount * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return rk_cma_heap_dma_buf_begin_cpu_access_partial(dmabuf, dir, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int rk_cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned int len = buffer->pagecount * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return rk_cma_heap_dma_buf_end_cpu_access_partial(dmabuf, dir, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int rk_cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) size_t size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret = remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) size, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void *rk_cma_heap_do_vmap(struct rk_cma_heap_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pgprot_t pgprot = PAGE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void *rk_cma_heap_vmap(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (buffer->vmap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) buffer->vmap_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) vaddr = buffer->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) vaddr = rk_cma_heap_do_vmap(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (IS_ERR(vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) buffer->vaddr = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) buffer->vmap_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void rk_cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mutex_lock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!--buffer->vmap_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) vunmap(buffer->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) buffer->vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mutex_unlock(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void rk_cma_heap_remove_dmabuf_list(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct rk_dma_heap_dmabuf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct rk_cma_heap *cma_heap = buffer->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct rk_dma_heap *heap = cma_heap->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mutex_lock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) list_for_each_entry(buf, &heap->dmabuf_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (buf->dmabuf == dmabuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dma_heap_print("<%s> free dmabuf<ino-%ld>@[%pa-%pa] to heap-<%s>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dmabuf->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dmabuf->file->f_inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) &buf->start, &buf->end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rk_dma_heap_get_name(heap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) list_del(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mutex_unlock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int rk_cma_heap_add_dmabuf_list(struct dma_buf *dmabuf, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct rk_dma_heap_dmabuf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct rk_cma_heap *cma_heap = buffer->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct rk_dma_heap *heap = cma_heap->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) buf = kzalloc(sizeof(*buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) INIT_LIST_HEAD(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) buf->dmabuf = dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) buf->start = buffer->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) buf->end = buf->start + buffer->len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) mutex_lock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) list_add_tail(&buf->node, &heap->dmabuf_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mutex_unlock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dma_heap_print("<%s> alloc dmabuf<ino-%ld>@[%pa-%pa] from heap-<%s>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dmabuf->name, dmabuf->file->f_inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) &buf->start, &buf->end, rk_dma_heap_get_name(heap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int rk_cma_heap_remove_contig_list(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct page *page, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct rk_dma_heap_contig_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) mutex_lock(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) list_for_each_entry(buf, &heap->contig_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (buf->start == page_to_phys(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dma_heap_print("<%s> free contig-buf@[%pa-%pa] to heap-<%s>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) buf->orig_alloc, &buf->start, &buf->end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) rk_dma_heap_get_name(heap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) list_del(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) kfree(buf->orig_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mutex_unlock(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int rk_cma_heap_add_contig_list(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct page *page, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct rk_dma_heap_contig_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) const char *name_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) buf = kzalloc(sizeof(*buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) INIT_LIST_HEAD(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) name_tmp = current->comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) name_tmp = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) buf->orig_alloc = kstrndup(name_tmp, RK_DMA_HEAP_NAME_LEN, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!buf->orig_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) buf->start = page_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) buf->end = buf->start + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mutex_lock(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) list_add_tail(&buf->node, &heap->contig_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mutex_unlock(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dma_heap_print("<%s> alloc contig-buf@[%pa-%pa] from heap-<%s>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) buf->orig_alloc, &buf->start, &buf->end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rk_dma_heap_get_name(heap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void rk_cma_heap_dma_buf_release(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct rk_cma_heap_buffer *buffer = dmabuf->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct rk_cma_heap *cma_heap = buffer->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct rk_dma_heap *heap = cma_heap->heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (buffer->vmap_cnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) vunmap(buffer->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rk_cma_heap_remove_dmabuf_list(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* free page list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) kfree(buffer->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* release memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) rk_dma_heap_total_dec(heap, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static const struct dma_buf_ops rk_cma_heap_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .cache_sgt_mapping = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .attach = rk_cma_heap_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .detach = rk_cma_heap_detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .map_dma_buf = rk_cma_heap_map_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .unmap_dma_buf = rk_cma_heap_unmap_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .begin_cpu_access = rk_cma_heap_dma_buf_begin_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) .end_cpu_access = rk_cma_heap_dma_buf_end_cpu_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .begin_cpu_access_partial = rk_cma_heap_dma_buf_begin_cpu_access_partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .end_cpu_access_partial = rk_cma_heap_dma_buf_end_cpu_access_partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .mmap = rk_cma_heap_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .vmap = rk_cma_heap_vmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .vunmap = rk_cma_heap_vunmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .release = rk_cma_heap_dma_buf_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static struct dma_buf *rk_cma_heap_allocate(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unsigned long fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned long heap_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct rk_cma_heap_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) size_t size = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pgoff_t pagecount = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned long align = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct page *cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pgoff_t pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) INIT_LIST_HEAD(&buffer->attachments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mutex_init(&buffer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) buffer->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (align > CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) align = CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!cma_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Clear the cma pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (PageHighMem(cma_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) unsigned long nr_clear_pages = pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct page *page = cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) while (nr_clear_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) void *vaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) memset(vaddr, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Avoid wasting time zeroing memory if the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * has been killed by SIGKILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto free_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) nr_clear_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) memset(page_address(cma_pages), 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!buffer->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto free_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (pg = 0; pg < pagecount; pg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) buffer->pages[pg] = &cma_pages[pg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) buffer->cma_pages = cma_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) buffer->heap = cma_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) buffer->pagecount = pagecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* create the dmabuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) exp_info.exp_name = rk_dma_heap_get_name(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) exp_info.ops = &rk_cma_heap_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) exp_info.size = buffer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) exp_info.flags = fd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) exp_info.priv = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) dmabuf = dma_buf_export(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (IS_ERR(dmabuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = PTR_ERR(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) buffer->phys = page_to_phys(cma_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dma_sync_single_for_cpu(rk_dma_heap_get_dev(heap), buffer->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) buffer->pagecount * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ret = rk_cma_heap_add_dmabuf_list(dmabuf, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) goto fail_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) rk_dma_heap_total_inc(heap, buffer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) fail_dma_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_buf_put(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) kfree(buffer->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) free_cma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) cma_release(cma_heap->cma, cma_pages, pagecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static struct page *rk_cma_heap_allocate_pages(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) size_t len, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) size_t size = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pgoff_t pagecount = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned long align = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (align > CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) align = CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) page = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ret = rk_cma_heap_add_contig_list(heap, page, size, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cma_release(cma_heap->cma, page, pagecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rk_dma_heap_total_inc(heap, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static void rk_cma_heap_free_pages(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct page *page, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pgoff_t pagecount = len >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rk_cma_heap_remove_contig_list(heap, page, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cma_release(cma_heap->cma, page, pagecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) rk_dma_heap_total_dec(heap, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static const struct rk_dma_heap_ops rk_cma_heap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .allocate = rk_cma_heap_allocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) .alloc_contig_pages = rk_cma_heap_allocate_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) .free_contig_pages = rk_cma_heap_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int cma_procfs_show(struct seq_file *s, void *private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static int __rk_add_cma_heap(struct cma *cma, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct rk_cma_heap *cma_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct rk_dma_heap_export_info exp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!cma_heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cma_heap->cma = cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) exp_info.name = cma_get_name(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) exp_info.ops = &rk_cma_heap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) exp_info.priv = cma_heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) exp_info.support_cma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) cma_heap->heap = rk_dma_heap_add(&exp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (IS_ERR(cma_heap->heap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int ret = PTR_ERR(cma_heap->heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) kfree(cma_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (cma_heap->heap->procfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) proc_create_single_data("alloc_bitmap", 0, cma_heap->heap->procfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cma_procfs_show, cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int __init rk_add_default_cma_heap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct cma *cma = rk_dma_heap_get_cma();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (WARN_ON(!cma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return __rk_add_cma_heap(cma, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #if defined(CONFIG_VIDEO_ROCKCHIP_THUNDER_BOOT_ISP) && !defined(CONFIG_INITCALL_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) subsys_initcall(rk_add_default_cma_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) module_init(rk_add_default_cma_heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static void cma_procfs_format_array(char *buf, size_t bufsize, u32 *array, int array_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) while (--array_size >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) char term = (array_size && (++i % 8)) ? ' ' : '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) len = snprintf(buf, bufsize, "%08X%c", *array++, term);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) bufsize -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void cma_procfs_show_bitmap(struct seq_file *s, struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) int elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) int size = elements * 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u32 *array = (u32 *)cma->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) buf = kmalloc(size + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) buf[size] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) cma_procfs_format_array(buf, size + 1, array, elements);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) seq_printf(s, "%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static u64 cma_procfs_used_get(struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned long used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) mutex_lock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return (u64)used << cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int cma_procfs_show(struct seq_file *s, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct cma *cma = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u64 used = cma_procfs_used_get(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) seq_printf(s, "Total: %lu KiB\n", cma->count << (PAGE_SHIFT - 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) seq_printf(s, " Used: %llu KiB\n\n", used << (PAGE_SHIFT - 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) cma_procfs_show_bitmap(s, cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) MODULE_DESCRIPTION("RockChip DMA-BUF CMA Heap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) MODULE_LICENSE("GPL v2");