^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Framework for userspace DMA-BUF allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2019 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2022 Rockchip Electronics Co. Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Simon Xue <xxm@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-resv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <uapi/linux/rk-dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "rk-dma-heap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DEVNAME "rk_dma_heap"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define NUM_HEAP_MINORS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static LIST_HEAD(rk_heap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static DEFINE_MUTEX(rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static dev_t rk_dma_heap_devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct class *rk_dma_heap_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static DEFINE_XARRAY_ALLOC(rk_dma_heap_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct proc_dir_entry *proc_rk_dma_heap_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define K(size) ((unsigned long)((size) >> 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int rk_vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct rk_vmap_pfn_data *data = private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *pte = pte_mkspecial(pfn_pte(data->pfn++, data->prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void *rk_vmap_contig_pfn(unsigned long pfn, unsigned int count, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rk_vmap_pfn_data data = { .pfn = pfn, .prot = pgprot_nx(prot) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) area = get_vm_area_caller(count * PAGE_SIZE, VM_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) count * PAGE_SIZE, rk_vmap_pfn_apply, &data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) free_vm_area(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int rk_dma_heap_set_dev(struct device *heap_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!heap_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!heap_dev->dma_parms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) heap_dev->dma_parms = devm_kzalloc(heap_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sizeof(*heap_dev->dma_parms),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!heap_dev->dma_parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) devm_kfree(heap_dev, heap_dev->dma_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(rk_dma_heap_set_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct rk_dma_heap *rk_dma_heap_find(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct rk_dma_heap *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_for_each_entry(h, &rk_heap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!strcmp(h->name, name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) kref_get(&h->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) EXPORT_SYMBOL_GPL(rk_dma_heap_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void rk_dma_heap_buffer_free(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dma_buf_put(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL_GPL(rk_dma_heap_buffer_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct dma_buf *rk_dma_heap_buffer_alloc(struct rk_dma_heap *heap, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned int heap_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (fd_flags & ~RK_DMA_HEAP_VALID_FD_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (heap_flags & ~RK_DMA_HEAP_VALID_HEAP_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Allocations from all heaps have to begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * and end on page boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) len = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (IS_ENABLED(CONFIG_DMABUF_RK_HEAPS_DEBUG) && !IS_ERR(dmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dma_buf_set_name(dmabuf, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) EXPORT_SYMBOL_GPL(rk_dma_heap_buffer_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int rk_dma_heap_bufferfd_alloc(struct rk_dma_heap *heap, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int heap_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dmabuf = rk_dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (IS_ERR(dmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return PTR_ERR(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) fd = dma_buf_fd(dmabuf, fd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dma_buf_put(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* just return, as put will call release and that will free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) EXPORT_SYMBOL_GPL(rk_dma_heap_bufferfd_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct page *rk_dma_heap_alloc_contig_pages(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) size_t len, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!heap->support_cma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) WARN_ON(!heap->support_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) len = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return heap->ops->alloc_contig_pages(heap, len, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL_GPL(rk_dma_heap_alloc_contig_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void rk_dma_heap_free_contig_pages(struct rk_dma_heap *heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct page *pages, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!heap->support_cma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) WARN_ON(!heap->support_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return heap->ops->free_contig_pages(heap, pages, len, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) EXPORT_SYMBOL_GPL(rk_dma_heap_free_contig_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void rk_dma_heap_total_inc(struct rk_dma_heap *heap, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) heap->total_size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void rk_dma_heap_total_dec(struct rk_dma_heap *heap, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (WARN_ON(heap->total_size < len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) heap->total_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) heap->total_size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int rk_dma_heap_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct rk_dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) heap = xa_load(&rk_dma_heap_minors, iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!heap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* instance data as context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) file->private_data = heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) nonseekable_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static long rk_dma_heap_ioctl_allocate(struct file *file, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct rk_dma_heap_allocation_data *heap_allocation = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct rk_dma_heap *heap = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (heap_allocation->fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) fd = rk_dma_heap_bufferfd_alloc(heap, heap_allocation->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) heap_allocation->fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) heap_allocation->heap_flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) heap_allocation->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static unsigned int rk_dma_heap_ioctl_cmds[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) RK_DMA_HEAP_IOCTL_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static long rk_dma_heap_ioctl(struct file *file, unsigned int ucmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) char stack_kdata[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) char *kdata = stack_kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned int kcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int in_size, out_size, drv_size, ksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int nr = _IOC_NR(ucmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (nr >= ARRAY_SIZE(rk_dma_heap_ioctl_cmds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Get the kernel ioctl cmd that matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) kcmd = rk_dma_heap_ioctl_cmds[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Figure out the delta between user cmd size and kernel cmd size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) drv_size = _IOC_SIZE(kcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) out_size = _IOC_SIZE(ucmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) in_size = out_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if ((ucmd & kcmd & IOC_IN) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) in_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if ((ucmd & kcmd & IOC_OUT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) out_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ksize = max(max(in_size, out_size), drv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* If necessary, allocate buffer for ioctl argument */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ksize > sizeof(stack_kdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) kdata = kmalloc(ksize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* zero out any difference between the kernel/user structure size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (ksize > in_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) memset(kdata + in_size, 0, ksize - in_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) switch (kcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) case RK_DMA_HEAP_IOCTL_ALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ret = rk_dma_heap_ioctl_allocate(file, kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (kdata != stack_kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) kfree(kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static const struct file_operations rk_dma_heap_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .open = rk_dma_heap_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .unlocked_ioctl = rk_dma_heap_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .compat_ioctl = rk_dma_heap_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * rk_dma_heap_get_drvdata() - get per-subdriver data for the heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @heap: DMA-Heap to retrieve private data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * The per-subdriver data for the heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) void *rk_dma_heap_get_drvdata(struct rk_dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return heap->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void rk_dma_heap_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct rk_dma_heap *heap = container_of(ref, struct rk_dma_heap, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int minor = MINOR(heap->heap_devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Note, we already holding the rk_heap_list_lock here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) list_del(&heap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) device_destroy(rk_dma_heap_class, heap->heap_devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) cdev_del(&heap->heap_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) xa_erase(&rk_dma_heap_minors, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) kfree(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void rk_dma_heap_put(struct rk_dma_heap *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Take the rk_heap_list_lock now to avoid racing with code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * scanning the list and then taking a kref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) kref_put(&h->refcount, rk_dma_heap_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * rk_dma_heap_get_dev() - get device struct for the heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @heap: DMA-Heap to retrieve device struct from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * The device struct for the heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct device *rk_dma_heap_get_dev(struct rk_dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return heap->heap_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * rk_dma_heap_get_name() - get heap name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @heap: DMA-Heap to retrieve private data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * The char* for the heap name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) const char *rk_dma_heap_get_name(struct rk_dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return heap->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct rk_dma_heap *rk_dma_heap_add(const struct rk_dma_heap_export_info *exp_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct rk_dma_heap *heap, *err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!exp_info->name || !strcmp(exp_info->name, "")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pr_err("rk_dma_heap: Cannot add heap without a name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!exp_info->ops || !exp_info->ops->allocate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) pr_err("rk_dma_heap: Cannot add heap with invalid ops struct\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* check the name is unique */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) heap = rk_dma_heap_find(exp_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (heap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) pr_err("rk_dma_heap: Already registered heap named %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) exp_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) rk_dma_heap_put(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) heap = kzalloc(sizeof(*heap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kref_init(&heap->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) heap->name = exp_info->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) heap->ops = exp_info->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) heap->priv = exp_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) heap->support_cma = exp_info->support_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) INIT_LIST_HEAD(&heap->dmabuf_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) INIT_LIST_HEAD(&heap->contig_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mutex_init(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mutex_init(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* Find unused minor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = xa_alloc(&rk_dma_heap_minors, &minor, heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pr_err("rk_dma_heap: Unable to get minor number for heap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) err_ret = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Create device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) heap->heap_devt = MKDEV(MAJOR(rk_dma_heap_devt), minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cdev_init(&heap->heap_cdev, &rk_dma_heap_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pr_err("dma_heap: Unable to add char device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) err_ret = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) heap->heap_dev = device_create(rk_dma_heap_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) heap->heap_devt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) heap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (IS_ERR(heap->heap_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pr_err("rk_dma_heap: Unable to create device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) err_ret = ERR_CAST(heap->heap_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) heap->procfs = proc_rk_dma_heap_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Make sure it doesn't disappear on us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) heap->heap_dev = get_device(heap->heap_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Add heap to the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) list_add(&heap->list, &rk_heap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) cdev_del(&heap->heap_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) xa_erase(&rk_dma_heap_minors, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) kfree(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static char *rk_dma_heap_devnode(struct device *dev, umode_t *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return kasprintf(GFP_KERNEL, "rk_dma_heap/%s", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static int rk_dma_heap_dump_dmabuf(const struct dma_buf *dmabuf, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct rk_dma_heap *heap = (struct rk_dma_heap *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct rk_dma_heap_dmabuf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct dma_buf_attachment *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) phys_addr_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int attach_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (!strcmp(dmabuf->exp_name, heap->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) seq_printf(heap->s, "dma-heap:<%s> -dmabuf", heap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) mutex_lock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) list_for_each_entry(buf, &heap->dmabuf_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (buf->dmabuf->file->f_inode->i_ino ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dmabuf->file->f_inode->i_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) seq_printf(heap->s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) "\ti_ino = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dmabuf->file->f_inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) size = buf->end - buf->start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) seq_printf(heap->s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) "\tAlloc by (%-20s)\t[%pa-%pa]\t%pa (%lu KiB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dmabuf->name, &buf->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) &buf->end, &size, K(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) seq_puts(heap->s, "\t\tAttached Devices:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) attach_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ret = dma_resv_lock_interruptible(dmabuf->resv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) list_for_each_entry(a, &dmabuf->attachments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) seq_printf(heap->s, "\t\t%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dev_name(a->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) attach_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dma_resv_unlock(dmabuf->resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) seq_printf(heap->s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) "Total %d devices attached\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) attach_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mutex_unlock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) mutex_unlock(&heap->dmabuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static int rk_dma_heap_dump_contig(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct rk_dma_heap *heap = (struct rk_dma_heap *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct rk_dma_heap_contig_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) phys_addr_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mutex_lock(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) list_for_each_entry(buf, &heap->contig_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) size = buf->end - buf->start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) seq_printf(heap->s, "dma-heap:<%s> -non dmabuf\n", heap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) seq_printf(heap->s, "\tAlloc by (%-20s)\t[%pa-%pa]\t%pa (%lu KiB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) buf->orig_alloc, &buf->start, &buf->end, &size, K(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mutex_unlock(&heap->contig_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static ssize_t rk_total_pools_kb_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct rk_dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u64 total_pool_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) list_for_each_entry(heap, &rk_heap_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (heap->ops->get_pool_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) total_pool_size += heap->ops->get_pool_size(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return sysfs_emit(buf, "%llu\n", total_pool_size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static struct kobj_attribute rk_total_pools_kb_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) __ATTR_RO(rk_total_pools_kb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static struct attribute *rk_dma_heap_sysfs_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) &rk_total_pools_kb_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ATTRIBUTE_GROUPS(rk_dma_heap_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static struct kobject *rk_dma_heap_kobject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int rk_dma_heap_sysfs_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rk_dma_heap_kobject = kobject_create_and_add("rk_dma_heap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) kernel_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (!rk_dma_heap_kobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = sysfs_create_groups(rk_dma_heap_kobject,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rk_dma_heap_sysfs_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) kobject_put(rk_dma_heap_kobject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void rk_dma_heap_sysfs_teardown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) kobject_put(rk_dma_heap_kobject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static struct dentry *rk_dma_heap_debugfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int rk_dma_heap_debug_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct rk_dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned long total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) list_for_each_entry(heap, &rk_heap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) heap->s = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) get_each_dmabuf(rk_dma_heap_dump_dmabuf, heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rk_dma_heap_dump_contig(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) total += heap->total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) seq_printf(s, "\nTotal : 0x%lx (%lu KiB)\n", total, K(total));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) DEFINE_SHOW_ATTRIBUTE(rk_dma_heap_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int rk_dma_heap_init_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) d = debugfs_create_dir("rk_dma_heap", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_ERR(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return PTR_ERR(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) rk_dma_heap_debugfs_dir = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) d = debugfs_create_file("dma_heap_info", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) rk_dma_heap_debugfs_dir, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &rk_dma_heap_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (IS_ERR(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dma_heap_print("rk_dma_heap : debugfs: failed to create node bufinfo\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) debugfs_remove_recursive(rk_dma_heap_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) rk_dma_heap_debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) err = PTR_ERR(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static inline int rk_dma_heap_init_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static int rk_dma_heap_proc_show(struct seq_file *s, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct rk_dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unsigned long total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) mutex_lock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) list_for_each_entry(heap, &rk_heap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) heap->s = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) get_each_dmabuf(rk_dma_heap_dump_dmabuf, heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rk_dma_heap_dump_contig(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) total += heap->total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) seq_printf(s, "\nTotal : 0x%lx (%lu KiB)\n", total, K(total));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) mutex_unlock(&rk_heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int rk_dma_heap_info_proc_open(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return single_open(file, rk_dma_heap_proc_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static const struct proc_ops rk_dma_heap_info_proc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) .proc_open = rk_dma_heap_info_proc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) .proc_read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) .proc_lseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .proc_release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int rk_dma_heap_init_proc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) proc_rk_dma_heap_dir = proc_mkdir("rk_dma_heap", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!proc_rk_dma_heap_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) pr_err("create rk_dma_heap proc dir error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) proc_create("dma_heap_info", 0644, proc_rk_dma_heap_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) &rk_dma_heap_info_proc_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int rk_dma_heap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ret = rk_dma_heap_sysfs_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ret = alloc_chrdev_region(&rk_dma_heap_devt, 0, NUM_HEAP_MINORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) DEVNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) goto err_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) rk_dma_heap_class = class_create(THIS_MODULE, DEVNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (IS_ERR(rk_dma_heap_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ret = PTR_ERR(rk_dma_heap_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) goto err_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rk_dma_heap_class->devnode = rk_dma_heap_devnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) rk_dma_heap_init_debugfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) rk_dma_heap_init_proc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) err_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) unregister_chrdev_region(rk_dma_heap_devt, NUM_HEAP_MINORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) err_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rk_dma_heap_sysfs_teardown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) subsys_initcall(rk_dma_heap_init);