Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3)  * Helpers for DMA ops implementations.  These generally rely on the fact that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4)  * the allocated memory contains normal pages in the direct kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) static struct page *dma_common_vaddr_to_page(void *cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) 	if (is_vmalloc_addr(cpu_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) 		return vmalloc_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) 	return virt_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)  * Create scatter-list for the already allocated DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) 		 unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) 	struct page *page = dma_common_vaddr_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) EXPORT_SYMBOL_GPL(dma_common_get_sgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)  * Create userspace mapping for the DMA-coherent memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 		unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 	unsigned long user_count = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 	unsigned long off = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 	struct page *page = dma_common_vaddr_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 	int ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	if (off >= count || user_count > count - off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 	return remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 			page_to_pfn(page) + vma->vm_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 			user_count << PAGE_SHIFT, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) EXPORT_SYMBOL_GPL(dma_common_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct page *dma_common_alloc_pages(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 	const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 	page = dma_alloc_contiguous(dev, size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 		page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 	*dma_handle = ops->map_page(dev, page, 0, size, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 				    DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 	if (*dma_handle == DMA_MAPPING_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 		dma_free_contiguous(dev, page, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 	memset(page_address(page), 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 	return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 		dma_addr_t dma_handle, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 	const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 	if (ops->unmap_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 		ops->unmap_page(dev, dma_handle, size, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 				DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 	dma_free_contiguous(dev, page, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }