Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * ioport.c:  Simple io mapping allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * 2000/01/29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * <rth> zait: as long as pci_alloc_consistent produces something addressable, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *	things are ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *	pointer into the big page mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * <rth> zait: so what?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * <zaitcev> Hmm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *	So far so good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * <zaitcev> Now, driver calls pci_free_consistent(with result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *	remap_it_my_way()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * <zaitcev> How do you find the address to pass to free_pages()?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * <rth> zait: walk the page tables?  It's only two or three level after all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * <rth> zait: you have to walk them anyway to remove the mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * <zaitcev> Hmm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * <zaitcev> Sounds reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/pci.h>		/* struct pci_dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <asm/vaddrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <asm/io-unit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <asm/leon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /* This function must make sure that caches and memory are coherent after DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * On LEON systems without cache snooping it flushes the entire D-CACHE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static inline void dma_make_coherent(unsigned long pa, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if (sparc_cpu_model == sparc_leon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		if (!sparc_leon3_snooping_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			leon_flush_dcache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)     unsigned long size, char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static void _sparc_free_io(struct resource *res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static void register_proc_sparc_ioport(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /* This points to the next to use virtual memory for DVMA mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static struct resource _sparc_dvma = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	.name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) /* This points to the start of I/O mappings, cluable from outside. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /*ext*/ struct resource sparc_iomap = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	.name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * Our mini-allocator...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Boy this is gross! We need it because we must map I/O for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * timers and interrupt controller before the kmalloc is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define XNMLN  15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define XNRES  10	/* SS-10 uses 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) struct xresource {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct resource xres;	/* Must be first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int xflag;		/* 1 == used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	char xname[XNMLN+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static struct xresource xresv[XNRES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static struct xresource *xres_alloc(void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct xresource *xrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	xrp = xresv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	for (n = 0; n < XNRES; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if (xrp->xflag == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			xrp->xflag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			return xrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		xrp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void xres_free(struct xresource *xrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	xrp->xflag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * These are typically used in PCI drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * which are trying to be cross-platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * Bus type is always zero on IIep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void __iomem *ioremap(phys_addr_t offset, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	char name[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	sprintf(name, "phys_%08x", (u32)offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return _sparc_alloc_io(0, (unsigned long)offset, size, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * Complementary to ioremap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void iounmap(volatile void __iomem *virtual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 * This probably warrants some sort of hashing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		printk("free_io/iounmap: cannot free %lx\n", vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	_sparc_free_io(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		xres_free((struct xresource *)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) EXPORT_SYMBOL(iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void __iomem *of_ioremap(struct resource *res, unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			 unsigned long size, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return _sparc_alloc_io(res->flags & 0xF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			       res->start + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			       size, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) EXPORT_SYMBOL(of_ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) EXPORT_SYMBOL(of_iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * Meat of mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)     unsigned long size, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	static int printed_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct xresource *xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	char *tack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	int tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	void __iomem *va;	/* P3 diag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (name == NULL) name = "???";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if ((xres = xres_alloc()) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		tack = xres->xname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		res = &xres->xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (!printed_full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			printk("ioremap: done with statics, switching to malloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			printed_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		tlen = strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		if (tack == NULL) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		memset(tack, 0, sizeof(struct resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		res = (struct resource *) tack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		tack += sizeof (struct resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	strlcpy(tack, name, XNMLN+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	res->name = tack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	va = _sparc_ioremap(res, busno, phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	/* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	return va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (allocate_resource(&sparc_iomap, res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	    (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	    sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		/* Usually we cannot see printks in this case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		prom_printf("alloc_io_res(%s): cannot occupy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		    (res->name != NULL)? res->name: "???");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	pa &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	srmmu_mapiorange(bus, pa, res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	return (void __iomem *)(unsigned long)(res->start + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * Complementary to _sparc_ioremap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void _sparc_free_io(struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	unsigned long plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	plen = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	BUG_ON((plen & (PAGE_SIZE-1)) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	srmmu_unmapiorange(res->start, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	release_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	res = kzalloc(sizeof(*res), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	res->name = dev->of_node->full_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			      _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		printk("%s: cannot occupy 0x%zx", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bool sparc_dma_free_resource(void *cpu_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	unsigned long addr = (unsigned long)cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	res = lookup_resource(&_sparc_dvma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		printk("%s: cannot free %p\n", __func__, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if ((addr & (PAGE_SIZE - 1)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		printk("%s: unaligned va %p\n", __func__, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (resource_size(res) != size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		printk("%s: region 0x%lx asked 0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			__func__, (long)resource_size(res), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	release_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifdef CONFIG_SBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void sbus_set_sbus64(struct device *dev, int x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	printk("sbus_set_sbus64: unsupported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) EXPORT_SYMBOL(sbus_set_sbus64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int __init sparc_register_ioport(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	register_proc_sparc_ioport();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) arch_initcall(sparc_register_ioport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #endif /* CONFIG_SBUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Allocate and map kernel buffer using consistent mode DMA for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * hwdev should be valid struct pci_dev pointer for PCI devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (!size || size > 256 * 1024)	/* __get_free_pages() limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (!va) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	addr = sparc_dma_alloc_resource(dev, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	srmmu_mapiorange(0, virt_to_phys(va), addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	*dma_handle = virt_to_phys(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	return (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) err_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	free_pages((unsigned long)va, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Free and unmap a consistent DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * cpu_addr is what was returned arch_dma_alloc, size must be the same as what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * was passed into arch_dma_alloc, and likewise dma_addr must be the same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * what *dma_ndler was set to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * References to the memory and mappings associated with cpu_addr/dma_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * past this call are illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		dma_addr_t dma_addr, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (!sparc_dma_free_resource(cpu_addr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	dma_make_coherent(dma_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	srmmu_unmapiorange((unsigned long)cpu_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* IIep is write-through, not flushing on cpu to device transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (dir != PCI_DMA_TODEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		dma_make_coherent(paddr, PAGE_ALIGN(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int sparc_io_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct resource *root = m->private, *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	const char *nm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	for (r = root->child; r != NULL; r = r->sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		if ((nm = r->name) == NULL) nm = "???";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		seq_printf(m, "%016llx-%016llx: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 				(unsigned long long)r->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 				(unsigned long long)r->end, nm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #endif /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void register_proc_sparc_ioport(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	proc_create_single_data("io_map", 0, NULL, sparc_io_proc_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			&sparc_iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	proc_create_single_data("dvma_map", 0, NULL, sparc_io_proc_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			&_sparc_dvma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }