^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Framework for userspace DMA-BUF allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2019 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <uapi/linux/dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DEVNAME "dma_heap"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define NUM_HEAP_MINORS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * struct dma_heap - represents a dmabuf heap in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @name: used for debugging/device-node name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @ops: ops struct for this heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @heap_devt heap device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @list list head connecting to list of heaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @heap_cdev heap char device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * @heap_dev heap device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Represents a heap of memory from which buffers can be made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct dma_heap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) const struct dma_heap_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dev_t heap_devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct cdev heap_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct device *heap_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static LIST_HEAD(heap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static DEFINE_MUTEX(heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static dev_t dma_heap_devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct class *dma_heap_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static DEFINE_XARRAY_ALLOC(dma_heap_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct dma_heap *dma_heap_find(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct dma_heap *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mutex_lock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) list_for_each_entry(h, &heap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!strcmp(h->name, name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) kref_get(&h->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mutex_unlock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mutex_unlock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) EXPORT_SYMBOL_GPL(dma_heap_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void dma_heap_buffer_free(struct dma_buf *dmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dma_buf_put(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int heap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Allocations from all heaps have to begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * and end on page boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) len = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return heap->ops->allocate(heap, len, fd_flags, heap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int heap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct dma_buf *dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (IS_ERR(dmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return PTR_ERR(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) fd = dma_buf_fd(dmabuf, fd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dma_buf_put(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* just return, as put will call release and that will free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int dma_heap_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) heap = xa_load(&dma_heap_minors, iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!heap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* instance data as context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) file->private_data = heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) nonseekable_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static long dma_heap_ioctl_allocate(struct file *file, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct dma_heap_allocation_data *heap_allocation = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct dma_heap *heap = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (heap_allocation->fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) heap_allocation->fd_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) heap_allocation->heap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) heap_allocation->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int dma_heap_ioctl_get_phys(struct file *file, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #if IS_ENABLED(CONFIG_NO_GKI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct dma_heap *heap = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct dma_heap_phys_data *phys = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (heap->ops->get_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return heap->ops->get_phys(heap, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static unsigned int dma_heap_ioctl_cmds[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) DMA_HEAP_IOCTL_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) DMA_HEAP_IOCTL_GET_PHYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) char stack_kdata[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) char *kdata = stack_kdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned int kcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned int in_size, out_size, drv_size, ksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int nr = _IOC_NR(ucmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Get the kernel ioctl cmd that matches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) kcmd = dma_heap_ioctl_cmds[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Figure out the delta between user cmd size and kernel cmd size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) drv_size = _IOC_SIZE(kcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) out_size = _IOC_SIZE(ucmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) in_size = out_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if ((ucmd & kcmd & IOC_IN) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) in_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if ((ucmd & kcmd & IOC_OUT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) out_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ksize = max(max(in_size, out_size), drv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* If necessary, allocate buffer for ioctl argument */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (ksize > sizeof(stack_kdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) kdata = kmalloc(ksize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* zero out any difference between the kernel/user structure size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (ksize > in_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) memset(kdata + in_size, 0, ksize - in_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) switch (kcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) case DMA_HEAP_IOCTL_ALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret = dma_heap_ioctl_allocate(file, kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case DMA_HEAP_IOCTL_GET_PHYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ret = dma_heap_ioctl_get_phys(file, kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (kdata != stack_kdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kfree(kdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static const struct file_operations dma_heap_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .open = dma_heap_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .unlocked_ioctl = dma_heap_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .compat_ioctl = dma_heap_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * dma_heap_get_drvdata() - get per-subdriver data for the heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * @heap: DMA-Heap to retrieve private data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * The per-subdriver data for the heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void *dma_heap_get_drvdata(struct dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return heap->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void dma_heap_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int minor = MINOR(heap->heap_devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Note, we already holding the heap_list_lock here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) list_del(&heap->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) device_destroy(dma_heap_class, heap->heap_devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cdev_del(&heap->heap_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) xa_erase(&dma_heap_minors, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) kfree(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) void dma_heap_put(struct dma_heap *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Take the heap_list_lock now to avoid racing with code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * scanning the list and then taking a kref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mutex_lock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kref_put(&h->refcount, dma_heap_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mutex_unlock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) EXPORT_SYMBOL_GPL(dma_heap_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * dma_heap_get_dev() - get device struct for the heap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @heap: DMA-Heap to retrieve device struct from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * The device struct for the heap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct device *dma_heap_get_dev(struct dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return heap->heap_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) EXPORT_SYMBOL_GPL(dma_heap_get_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * dma_heap_get_name() - get heap name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @heap: DMA-Heap to retrieve private data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * The char* for the heap name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) const char *dma_heap_get_name(struct dma_heap *heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return heap->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) EXPORT_SYMBOL_GPL(dma_heap_get_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct dma_heap *heap, *err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!exp_info->name || !strcmp(exp_info->name, "")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pr_err("dma_heap: Cannot add heap without a name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!exp_info->ops || !exp_info->ops->allocate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* check the name is unique */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) heap = dma_heap_find(exp_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (heap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_err("dma_heap: Already registered heap named %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) exp_info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_heap_put(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) heap = kzalloc(sizeof(*heap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!heap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) kref_init(&heap->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) heap->name = exp_info->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) heap->ops = exp_info->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) heap->priv = exp_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Find unused minor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ret = xa_alloc(&dma_heap_minors, &minor, heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pr_err("dma_heap: Unable to get minor number for heap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err_ret = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Create device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) cdev_init(&heap->heap_cdev, &dma_heap_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pr_err("dma_heap: Unable to add char device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) err_ret = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) heap->heap_dev = device_create(dma_heap_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) heap->heap_devt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) heap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (IS_ERR(heap->heap_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pr_err("dma_heap: Unable to create device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err_ret = ERR_CAST(heap->heap_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Make sure it doesn't disappear on us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) heap->heap_dev = get_device(heap->heap_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Add heap to the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) mutex_lock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) list_add(&heap->list, &heap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) mutex_unlock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cdev_del(&heap->heap_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) xa_erase(&dma_heap_minors, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) kfree(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) EXPORT_SYMBOL_GPL(dma_heap_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static char *dma_heap_devnode(struct device *dev, umode_t *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static ssize_t total_pools_kb_show(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct dma_heap *heap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u64 total_pool_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) mutex_lock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) list_for_each_entry(heap, &heap_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (heap->ops->get_pool_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) total_pool_size += heap->ops->get_pool_size(heap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mutex_unlock(&heap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return sysfs_emit(buf, "%llu\n", total_pool_size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static struct kobj_attribute total_pools_kb_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __ATTR_RO(total_pools_kb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static struct attribute *dma_heap_sysfs_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) &total_pools_kb_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ATTRIBUTE_GROUPS(dma_heap_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static struct kobject *dma_heap_kobject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int dma_heap_sysfs_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dma_heap_kobject = kobject_create_and_add("dma_heap", kernel_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!dma_heap_kobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = sysfs_create_groups(dma_heap_kobject, dma_heap_sysfs_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) kobject_put(dma_heap_kobject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static void dma_heap_sysfs_teardown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) kobject_put(dma_heap_kobject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static int dma_heap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ret = dma_heap_sysfs_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto err_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dma_heap_class = class_create(THIS_MODULE, DEVNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (IS_ERR(dma_heap_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ret = PTR_ERR(dma_heap_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto err_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dma_heap_class->devnode = dma_heap_devnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) err_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) err_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dma_heap_sysfs_teardown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) subsys_initcall(dma_heap_init);