Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  This driver needs a DirectFB counterpart in user space, communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  is handled via mmap()ed memory areas and an ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *  Copyright (c) 2009 Janine Kropp <nin@directfb.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *  Copyright (c) 2009 Denis Oliver Kropp <dok@directfb.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * WARNING: This controller is attached to System Bus 2 of the PXA which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * needs its arbiter to be enabled explicitly (CKENB & 1<<9).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * There is currently no way to do this from Linux, so you need to teach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * your bootloader for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include "pxa3xx-gcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define DRV_NAME	"pxa3xx-gcu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define REG_GCCR	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define GCCR_SYNC_CLR	(1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define GCCR_BP_RST	(1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define GCCR_ABORT	(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define GCCR_STOP	(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define REG_GCISCR	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define REG_GCIECR	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define REG_GCRBBR	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define REG_GCRBLR	0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define REG_GCRBHR	0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define REG_GCRBTR	0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define REG_GCRBEXHR	0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define IE_EOB		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define IE_EEOB		(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define IE_ALL		0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define SHARED_SIZE	PAGE_ALIGN(sizeof(struct pxa3xx_gcu_shared))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /* #define PXA3XX_GCU_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /* #define PXA3XX_GCU_DEBUG_TIMER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #ifdef PXA3XX_GCU_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define QDUMP(msg)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	do {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		QPRINT(priv, KERN_DEBUG, msg);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define QDUMP(msg)	do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define QERROR(msg)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	do {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		QPRINT(priv, KERN_ERR, msg);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) struct pxa3xx_gcu_batch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct pxa3xx_gcu_batch *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u32			*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	dma_addr_t		 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	unsigned long		 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) struct pxa3xx_gcu_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct device		 *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	void __iomem		 *mmio_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct clk		 *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct pxa3xx_gcu_shared *shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	dma_addr_t		  shared_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct resource		 *resource_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct miscdevice	  misc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	wait_queue_head_t	  wait_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	wait_queue_head_t	  wait_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	spinlock_t		  spinlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	struct timespec64	  base_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct pxa3xx_gcu_batch *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct pxa3xx_gcu_batch *ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct pxa3xx_gcu_batch *ready_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct pxa3xx_gcu_batch *running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) gc_readl(struct pxa3xx_gcu_priv *priv, unsigned int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return __raw_readl(priv->mmio_base + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) gc_writel(struct pxa3xx_gcu_priv *priv, unsigned int off, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	__raw_writel(val, priv->mmio_base + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define QPRINT(priv, level, msg)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		struct timespec64 ts;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		struct pxa3xx_gcu_shared *shared = priv->shared;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		u32 base = gc_readl(priv, REG_GCRBBR);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		ktime_get_ts64(&ts);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		ts = timespec64_sub(ts, priv->base_time);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		printk(level "%lld.%03ld.%03ld - %-17s: %-21s (%s, "	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			"STATUS "					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			"0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, "	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			"T %5ld)\n",					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			(s64)(ts.tv_sec),				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			ts.tv_nsec / NSEC_PER_MSEC,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			(ts.tv_nsec % NSEC_PER_MSEC) / USEC_PER_MSEC,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			__func__, msg,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			shared->hw_running ? "running" : "   idle",	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			gc_readl(priv, REG_GCISCR),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			gc_readl(priv, REG_GCRBBR),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			gc_readl(priv, REG_GCRBLR),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			(gc_readl(priv, REG_GCRBEXHR) - base) / 4,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			(gc_readl(priv, REG_GCRBHR) - base) / 4,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			(gc_readl(priv, REG_GCRBTR) - base) / 4);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pxa3xx_gcu_reset(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	QDUMP("RESET");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	gc_writel(priv, REG_GCIECR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	/* reset hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	gc_writel(priv, REG_GCCR, GCCR_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	gc_writel(priv, REG_GCCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	memset(priv->shared, 0, SHARED_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	priv->shared->buffer_phys = priv->shared_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	priv->shared->magic = PXA3XX_GCU_SHARED_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	ktime_get_ts64(&priv->base_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	/* set up the ring buffer pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	gc_writel(priv, REG_GCRBLR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	gc_writel(priv, REG_GCRBBR, priv->shared_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	gc_writel(priv, REG_GCRBTR, priv->shared_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/* enable all IRQs except EOB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	gc_writel(priv, REG_GCIECR, IE_ALL & ~IE_EOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dump_whole_state(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct pxa3xx_gcu_shared *sh = priv->shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	u32 base = gc_readl(priv, REG_GCRBBR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	QDUMP("DUMP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	printk(KERN_DEBUG "== PXA3XX-GCU DUMP ==\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		"%s, STATUS 0x%02lx, B 0x%08lx [%ld], E %5ld, H %5ld, T %5ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		sh->hw_running ? "running" : "idle   ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		gc_readl(priv, REG_GCISCR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		gc_readl(priv, REG_GCRBBR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		gc_readl(priv, REG_GCRBLR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		(gc_readl(priv, REG_GCRBEXHR) - base) / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		(gc_readl(priv, REG_GCRBHR) - base) / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		(gc_readl(priv, REG_GCRBTR) - base) / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) flush_running(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct pxa3xx_gcu_batch *running = priv->running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct pxa3xx_gcu_batch *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	while (running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		next = running->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		running->next = priv->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		priv->free = running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		running = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	priv->running = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) run_ready(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	unsigned int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct pxa3xx_gcu_shared *shared = priv->shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct pxa3xx_gcu_batch	*ready = priv->ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	QDUMP("Start");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	BUG_ON(!ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	shared->buffer[num++] = 0x05000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	while (ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		shared->buffer[num++] = 0x00000001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		shared->buffer[num++] = ready->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		ready = ready->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	shared->buffer[num++] = 0x05000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	priv->running = priv->ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	priv->ready = priv->ready_last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	gc_writel(priv, REG_GCRBLR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	shared->hw_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	/* ring base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	gc_writel(priv, REG_GCRBBR, shared->buffer_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/* ring tail address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	gc_writel(priv, REG_GCRBTR, shared->buffer_phys + num * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	/* ring length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	gc_writel(priv, REG_GCRBLR, ((num + 63) & ~63) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pxa3xx_gcu_handle_irq(int irq, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct pxa3xx_gcu_priv *priv = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct pxa3xx_gcu_shared *shared = priv->shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	u32 status = gc_readl(priv, REG_GCISCR) & IE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	QDUMP("-Interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	spin_lock(&priv->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	shared->num_interrupts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (status & IE_EEOB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		QDUMP(" [EEOB]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		flush_running(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		wake_up_all(&priv->wait_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		if (priv->ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			run_ready(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			/* There is no more data prepared by the userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			 * Set hw_running = 0 and wait for the next userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			 * kick-off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			shared->num_idle++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			shared->hw_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			QDUMP(" '-> Idle.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			/* set ring buffer length to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			gc_writel(priv, REG_GCRBLR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			wake_up_all(&priv->wait_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		shared->num_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		QERROR(" [???]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		dump_whole_state(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/* Clear the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	gc_writel(priv, REG_GCISCR, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	spin_unlock(&priv->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	QDUMP("Waiting for idle...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	/* Does not need to be atomic. There's a lock in user space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 * but anyhow, this is just for statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	priv->shared->num_wait_idle++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	while (priv->shared->hw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		int num = priv->shared->num_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		ret = wait_event_interruptible_timeout(priv->wait_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 					!priv->shared->hw_running, HZ*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		    priv->shared->num_interrupts == num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			QERROR("TIMEOUT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	QDUMP("done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pxa3xx_gcu_wait_free(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	QDUMP("Waiting for free...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	/* Does not need to be atomic. There's a lock in user space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 * but anyhow, this is just for statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	priv->shared->num_wait_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	while (!priv->free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		u32 rbexhr = gc_readl(priv, REG_GCRBEXHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		ret = wait_event_interruptible_timeout(priv->wait_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 						       priv->free, HZ*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		if (gc_readl(priv, REG_GCRBEXHR) == rbexhr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			QERROR("TIMEOUT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	QDUMP("done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Misc device layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline struct pxa3xx_gcu_priv *to_pxa3xx_gcu_priv(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct miscdevice *dev = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	return container_of(dev, struct pxa3xx_gcu_priv, misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * provide an empty .open callback, so the core sets file->private_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static int pxa3xx_gcu_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pxa3xx_gcu_write(struct file *file, const char *buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 size_t count, loff_t *offp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	struct pxa3xx_gcu_batch	*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	int words = count / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	/* Does not need to be atomic. There's a lock in user space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 * but anyhow, this is just for statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	priv->shared->num_writes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	priv->shared->num_words += words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	/* Last word reserved for batch buffer end command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (words >= PXA3XX_GCU_BATCH_WORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	/* Wait for a free buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (!priv->free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		ret = pxa3xx_gcu_wait_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	 * Get buffer from free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	spin_lock_irqsave(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	buffer = priv->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	priv->free = buffer->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	spin_unlock_irqrestore(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	/* Copy data from user into buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	ret = copy_from_user(buffer->ptr, buff, words * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		spin_lock_irqsave(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		buffer->next = priv->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		priv->free = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		spin_unlock_irqrestore(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	buffer->length = words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	/* Append batch buffer end command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	buffer->ptr[words] = 0x01000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	 * Add buffer to ready list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	spin_lock_irqsave(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	buffer->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	if (priv->ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		BUG_ON(priv->ready_last == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		priv->ready_last->next = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		priv->ready = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	priv->ready_last = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if (!priv->shared->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		run_ready(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	spin_unlock_irqrestore(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	return words * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	case PXA3XX_GCU_IOCTL_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		spin_lock_irqsave(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		pxa3xx_gcu_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		spin_unlock_irqrestore(&priv->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	case PXA3XX_GCU_IOCTL_WAIT_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		return pxa3xx_gcu_wait_idle(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	unsigned int size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	switch (vma->vm_pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		/* hand out the shared data area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		if (size != SHARED_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		return dma_mmap_coherent(priv->dev, vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			priv->shared, priv->shared_phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	case SHARED_SIZE >> PAGE_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		/* hand out the MMIO base for direct register access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		 * from userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		if (size != resource_size(priv->resource_mem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		return io_remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 				priv->resource_mem->start >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 				size, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #ifdef PXA3XX_GCU_DEBUG_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static struct timer_list pxa3xx_gcu_debug_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static struct pxa3xx_gcu_priv *debug_timer_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct pxa3xx_gcu_priv *priv = debug_timer_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	QERROR("Timer DUMP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	/* init the timer structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	debug_timer_priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	pxa3xx_gcu_debug_timedout(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) pxa3xx_gcu_add_buffer(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		      struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	struct pxa3xx_gcu_batch *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	buffer = kzalloc(sizeof(struct pxa3xx_gcu_batch), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	buffer->ptr = dma_alloc_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 					 &buffer->phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (!buffer->ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	buffer->next = priv->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	priv->free = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) pxa3xx_gcu_free_buffers(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			struct pxa3xx_gcu_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct pxa3xx_gcu_batch *next, *buffer = priv->free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	while (buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		next = buffer->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		dma_free_coherent(dev, PXA3XX_GCU_BATCH_WORDS * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 				  buffer->ptr, buffer->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		buffer = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	priv->free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static const struct file_operations pxa3xx_gcu_miscdev_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	.owner =		THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	.open =			pxa3xx_gcu_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	.write =		pxa3xx_gcu_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	.unlocked_ioctl =	pxa3xx_gcu_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	.mmap =			pxa3xx_gcu_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int pxa3xx_gcu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	int i, ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	struct pxa3xx_gcu_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	priv = devm_kzalloc(dev, sizeof(struct pxa3xx_gcu_priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	init_waitqueue_head(&priv->wait_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	init_waitqueue_head(&priv->wait_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	spin_lock_init(&priv->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	/* we allocate the misc device structure as part of our own allocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	 * so we can get a pointer to our priv structure later on with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	 * container_of(). This isn't really necessary as we have a fixed minor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	 * number anyway, but this is to avoid statics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	priv->misc_dev.minor	= PXA3XX_GCU_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	priv->misc_dev.name	= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	priv->misc_dev.fops	= &pxa3xx_gcu_miscdev_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	/* handle IO resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	priv->mmio_base = devm_ioremap_resource(dev, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (IS_ERR(priv->mmio_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		return PTR_ERR(priv->mmio_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	/* enable the clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	priv->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		dev_err(dev, "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	/* request the IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		dev_err(dev, "no IRQ defined: %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			       0, DRV_NAME, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		dev_err(dev, "request_irq failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	/* allocate dma memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	priv->shared = dma_alloc_coherent(dev, SHARED_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 					  &priv->shared_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	if (!priv->shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		dev_err(dev, "failed to allocate DMA memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	/* register misc device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	ret = misc_register(&priv->misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		dev_err(dev, "misc_register() for minor %d failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 			PXA3XX_GCU_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		dev_err(dev, "failed to enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		goto err_misc_deregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		ret = pxa3xx_gcu_add_buffer(dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			dev_err(dev, "failed to allocate DMA memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			goto err_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	priv->resource_mem = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	pxa3xx_gcu_reset(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	pxa3xx_gcu_init_debug_timer(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 			(void *) r->start, (void *) priv->shared_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 			SHARED_SIZE, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) err_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	dma_free_coherent(dev, SHARED_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			priv->shared, priv->shared_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) err_misc_deregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	misc_deregister(&priv->misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) err_disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int pxa3xx_gcu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	struct pxa3xx_gcu_priv *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	pxa3xx_gcu_wait_idle(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	misc_deregister(&priv->misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	pxa3xx_gcu_free_buffers(dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static const struct of_device_id pxa3xx_gcu_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	{ .compatible = "marvell,pxa300-gcu", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) MODULE_DEVICE_TABLE(of, pxa3xx_gcu_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static struct platform_driver pxa3xx_gcu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	.probe	  = pxa3xx_gcu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	.remove	 = pxa3xx_gcu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	.driver	 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		.name   = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		.of_match_table = of_match_ptr(pxa3xx_gcu_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) module_platform_driver(pxa3xx_gcu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) MODULE_ALIAS_MISCDEV(PXA3XX_GCU_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		"Denis Oliver Kropp <dok@directfb.org>, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		"Daniel Mack <daniel@caiaq.de>");