Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Coherent per-device memory handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Borrowed from i386
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) struct dma_coherent_mem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	void		*virt_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	dma_addr_t	device_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	unsigned long	pfn_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	int		size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	unsigned long	*bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	spinlock_t	spinlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	bool		use_dev_dma_pfn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	if (dev && dev->dma_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		return dev->dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static inline dma_addr_t dma_get_device_base(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 					     struct dma_coherent_mem * mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	if (mem->use_dev_dma_pfn_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	return mem->device_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static int dma_init_coherent_memory(phys_addr_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		dma_addr_t device_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		struct dma_coherent_mem **mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct dma_coherent_mem *dma_mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	void *mem_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int pages = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	mem_base = memremap(phys_addr, size, MEMREMAP_WC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (!mem_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (!dma_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (!dma_mem->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	dma_mem->virt_base = mem_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	dma_mem->device_base = device_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	dma_mem->pfn_base = PFN_DOWN(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	dma_mem->size = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	spin_lock_init(&dma_mem->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	*mem = dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	kfree(dma_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (mem_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		memunmap(mem_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	memunmap(mem->virt_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	kfree(mem->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	kfree(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static int dma_assign_coherent_memory(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 				      struct dma_coherent_mem *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (dev->dma_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	dev->dma_mem = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * Declare a region of memory to be handed out by dma_alloc_coherent() when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * is asked for coherent memory for this device.  This shall only be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * from platform code, usually based on the device tree description.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * phys_addr is the CPU physical address to which the memory is currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * assigned (this will be ioremapped so the CPU can access the region).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * device_addr is the DMA address the device needs to be programmed with to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * actually address this memory (this will be handed out as the dma_addr_t in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * dma_alloc_coherent()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * size is the size of the area (must be a multiple of PAGE_SIZE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * As a simplification for the platforms, only *one* such region of memory may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * be declared per device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				dma_addr_t device_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	struct dma_coherent_mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	ret = dma_assign_coherent_memory(dev, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		_dma_release_coherent_memory(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void dma_release_coherent_memory(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		_dma_release_coherent_memory(dev->dma_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void *__dma_alloc_from_coherent(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 				       struct dma_coherent_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				       ssize_t size, dma_addr_t *dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	int order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int pageno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	spin_lock_irqsave(&mem->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (unlikely(pageno < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * Memory was found in the coherent area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	*dma_handle = dma_get_device_base(dev, mem) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			((dma_addr_t)pageno << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	spin_unlock_irqrestore(&mem->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	memset(ret, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	spin_unlock_irqrestore(&mem->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * @dev:	device from which we allocate memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * @size:	size of requested memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * @dma_handle:	This will be filled with the correct dma handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * @ret:	This pointer will be filled with the virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *		to allocated area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * This function should be only called from per-arch dma_alloc_coherent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * to support allocation from per-device coherent memory pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * Returns 0 if dma_alloc_coherent should continue with allocating from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		dma_addr_t *dma_handle, void **ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	*ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				     dma_addr_t *dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (!dma_coherent_default_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 					 dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				       int order, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (mem && vaddr >= mem->virt_base && vaddr <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		   (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		spin_lock_irqsave(&mem->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		bitmap_release_region(mem->bitmap, page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		spin_unlock_irqrestore(&mem->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * dma_release_from_dev_coherent() - free memory to device coherent memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * @dev:	device from which the memory was allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * @order:	the order of pages allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * @vaddr:	virtual address of allocated pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * This checks whether the memory was allocated from the per-device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * coherent memory pool and if so, releases that memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * Returns 1 if we correctly released the memory, or 0 if the caller should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * proceed with releasing memory from generic pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return __dma_release_from_coherent(mem, order, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int dma_release_from_global_coherent(int order, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!dma_coherent_default_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return __dma_release_from_coherent(dma_coherent_default_memory, order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (mem && vaddr >= mem->virt_base && vaddr + size <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		   (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		unsigned long off = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		unsigned long user_count = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		*ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		if (off < count && user_count <= count - off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			unsigned long pfn = mem->pfn_base + start + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 					       user_count << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 					       vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * @dev:	device from which the memory was allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * @vma:	vm_area for the userspace memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * @vaddr:	cpu address returned by dma_alloc_from_dev_coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * @size:	size of the memory buffer allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * @ret:	result from remap_pfn_range()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * This checks whether the memory was allocated from the per-device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * coherent memory pool and if so, maps that memory to the provided vma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * Returns 1 if @vaddr belongs to the device coherent pool and the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * should return @ret, or 0 if they should proceed with mapping memory from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * generic areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			   void *vaddr, size_t size, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				   size_t size, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!dma_coherent_default_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 					vaddr, size, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * Support for reserved memory regions defined in device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #ifdef CONFIG_OF_RESERVED_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #include <linux/of_fdt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #include <linux/of_reserved_mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct reserved_mem *dma_reserved_default_memory __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct dma_coherent_mem *mem = rmem->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		ret = dma_init_coherent_memory(rmem->base, rmem->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 					       rmem->size, &mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				&rmem->base, (unsigned long)rmem->size / SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	mem->use_dev_dma_pfn_offset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	rmem->priv = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	dma_assign_coherent_memory(dev, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void rmem_dma_device_release(struct reserved_mem *rmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 				    struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		dev->dma_mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static const struct reserved_mem_ops rmem_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	.device_init	= rmem_dma_device_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	.device_release	= rmem_dma_device_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static int __init rmem_dma_setup(struct reserved_mem *rmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	unsigned long node = rmem->fdt_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (of_get_flat_dt_prop(node, "reusable", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		pr_err("Reserved memory: regions without no-map are not yet supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		WARN(dma_reserved_default_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		     "Reserved memory: region for default DMA coherent area is redefined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		dma_reserved_default_memory = rmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	rmem->ops = &rmem_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static int __init dma_init_reserved_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	const struct reserved_mem_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (!dma_reserved_default_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	ops = dma_reserved_default_memory->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	 * We rely on rmem_dma_device_init() does not propagate error of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	 * dma_assign_coherent_memory() for "NULL" device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	ret = ops->device_init(dma_reserved_default_memory, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		dma_coherent_default_memory = dma_reserved_default_memory->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		pr_info("DMA: default coherent area is set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) core_initcall(dma_init_reserved_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif