Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * MUSB OTG driver - support for Mentor's DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2005 Mentor Graphics Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2005-2007 by Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "musb_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "musb_dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 		(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define musb_read_hsdma_addr(mbase, bchannel)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	musb_readl(mbase,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 		   MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define musb_write_hsdma_addr(mbase, bchannel, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	musb_writel(mbase, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 		    MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		    addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define musb_read_hsdma_count(mbase, bchannel)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	musb_readl(mbase,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		   MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define musb_write_hsdma_count(mbase, bchannel, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	musb_writel(mbase, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		    MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		    len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /* control register (16-bit): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define MUSB_HSDMA_ENABLE_SHIFT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define MUSB_HSDMA_TRANSMIT_SHIFT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define MUSB_HSDMA_MODE1_SHIFT		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define MUSB_HSDMA_IRQENABLE_SHIFT	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define MUSB_HSDMA_ENDPOINT_SHIFT	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define MUSB_HSDMA_BUSERROR_SHIFT	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define MUSB_HSDMA_BURSTMODE_SHIFT	9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define MUSB_HSDMA_BURSTMODE		(3 << MUSB_HSDMA_BURSTMODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define MUSB_HSDMA_BURSTMODE_UNSPEC	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define MUSB_HSDMA_BURSTMODE_INCR4	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define MUSB_HSDMA_BURSTMODE_INCR8	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define MUSB_HSDMA_BURSTMODE_INCR16	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define MUSB_HSDMA_CHANNELS		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) struct musb_dma_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) struct musb_dma_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct dma_channel		channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct musb_dma_controller	*controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	u32				start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	u32				len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	u16				max_packet_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	u8				idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u8				epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	u8				transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) struct musb_dma_controller {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct dma_controller		controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct musb_dma_channel		channel[MUSB_HSDMA_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	void				*private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	void __iomem			*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u8				channel_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u8				used_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int				irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static void dma_channel_release(struct dma_channel *channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static void dma_controller_stop(struct musb_dma_controller *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct musb *musb = controller->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct dma_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (controller->used_channels != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		dev_err(musb->controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			"Stopping DMA controller while channel active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			if (controller->used_channels & (1 << bit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 				channel = &controller->channel[bit].channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 				dma_channel_release(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 				if (!controller->used_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				struct musb_hw_ep *hw_ep, u8 transmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct musb_dma_controller *controller = container_of(c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			struct musb_dma_controller, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct musb_dma_channel *musb_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct dma_channel *channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		if (!(controller->used_channels & (1 << bit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			controller->used_channels |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			musb_channel = &(controller->channel[bit]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			musb_channel->controller = controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			musb_channel->idx = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			musb_channel->epnum = hw_ep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			musb_channel->transmit = transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			channel = &(musb_channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			channel->private_data = musb_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			channel->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			channel->max_len = 0x100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			/* Tx => mode 1; Rx => mode 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			channel->desired_mode = transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			channel->actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void dma_channel_release(struct dma_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	struct musb_dma_channel *musb_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	channel->actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	musb_channel->start_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	musb_channel->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	musb_channel->controller->used_channels &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		~(1 << musb_channel->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	channel->status = MUSB_DMA_STATUS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void configure_channel(struct dma_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				u16 packet_sz, u8 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				dma_addr_t dma_addr, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct musb_dma_channel *musb_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct musb_dma_controller *controller = musb_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct musb *musb = controller->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	void __iomem *mbase = controller->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	u8 bchannel = musb_channel->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u16 csr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	musb_dbg(musb, "%p, pkt_sz %d, addr %pad, len %d, mode %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			channel, packet_sz, &dma_addr, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		BUG_ON(len < packet_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	csr |= MUSB_HSDMA_BURSTMODE_INCR16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				<< MUSB_HSDMA_BURSTMODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		| (1 << MUSB_HSDMA_ENABLE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		| (musb_channel->transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 				? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				: 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* address/count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	musb_write_hsdma_addr(mbase, bchannel, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	musb_write_hsdma_count(mbase, bchannel, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* control (this should start things) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	musb_writew(mbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static int dma_channel_program(struct dma_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				u16 packet_sz, u8 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				dma_addr_t dma_addr, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct musb_dma_channel *musb_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct musb_dma_controller *controller = musb_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct musb *musb = controller->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	musb_dbg(musb, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		musb_channel->epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		musb_channel->transmit ? "Tx" : "Rx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		packet_sz, &dma_addr, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		channel->status == MUSB_DMA_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * The DMA engine in RTL1.8 and above cannot handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * DMA addresses that are not aligned to a 4 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 * It ends up masking the last two bits of the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 * programmed in DMA_ADDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * Fail such DMA transfers, so that the backup PIO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * can carry out the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	channel->actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	musb_channel->start_addr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	musb_channel->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	musb_channel->max_packet_sz = packet_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	channel->status = MUSB_DMA_STATUS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	configure_channel(channel, packet_sz, mode, dma_addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int dma_channel_abort(struct dma_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct musb_dma_channel *musb_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	void __iomem *mbase = musb_channel->controller->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct musb *musb = musb_channel->controller->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	u8 bchannel = musb_channel->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (channel->status == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		if (musb_channel->transmit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			offset = musb->io.ep_offset(musb_channel->epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 						MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			 * The programming guide says that we must clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			 * the DMAENAB bit before the DMAMODE bit...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			csr = musb_readw(mbase, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			musb_writew(mbase, offset, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			csr &= ~MUSB_TXCSR_DMAMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			musb_writew(mbase, offset, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			offset = musb->io.ep_offset(musb_channel->epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 						MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			csr = musb_readw(mbase, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			csr &= ~(MUSB_RXCSR_AUTOCLEAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				 MUSB_RXCSR_DMAENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				 MUSB_RXCSR_DMAMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			musb_writew(mbase, offset, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		musb_writew(mbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		musb_write_hsdma_addr(mbase, bchannel, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		musb_write_hsdma_count(mbase, bchannel, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		channel->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) irqreturn_t dma_controller_irq(int irq, void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	struct musb_dma_controller *controller = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct musb *musb = controller->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	struct musb_dma_channel *musb_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	struct dma_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	void __iomem *mbase = controller->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	irqreturn_t retval = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	u8 bchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	u8 int_hsdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	u32 addr, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	int_hsdma = musb_clearb(mbase, MUSB_HSDMA_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	if (!int_hsdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		musb_dbg(musb, "spurious DMA irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			musb_channel = (struct musb_dma_channel *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 					&(controller->channel[bchannel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			channel = &musb_channel->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			if (channel->status == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				count = musb_read_hsdma_count(mbase, bchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 				if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 					int_hsdma |= (1 << bchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		musb_dbg(musb, "int_hsdma = 0x%x", int_hsdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		if (!int_hsdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		if (int_hsdma & (1 << bchannel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			musb_channel = (struct musb_dma_channel *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 					&(controller->channel[bchannel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			channel = &musb_channel->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			csr = musb_readw(mbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 					MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 							MUSB_HSDMA_CONTROL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 				musb_channel->channel.status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 					MUSB_DMA_STATUS_BUS_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 				u8 devctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 				addr = musb_read_hsdma_addr(mbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 						bchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 				channel->actual_len = addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 					- musb_channel->start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				musb_dbg(musb, "ch %p, 0x%x -> 0x%x (%zu / %d) %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 					channel, musb_channel->start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 					addr, channel->actual_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 					musb_channel->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 					(channel->actual_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 						< musb_channel->len) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 					"=> reconfig 0" : "=> complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				devctl = musb_readb(mbase, MUSB_DEVCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 				channel->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				/* completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 				if (musb_channel->transmit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 					(!channel->desired_mode ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 					(channel->actual_len %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 					    musb_channel->max_packet_sz))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 					u8  epnum  = musb_channel->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 					int offset = musb->io.ep_offset(epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 								    MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 					u16 txcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 					/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 					 * The programming guide says that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 					 * must clear DMAENAB before DMAMODE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 					musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 					txcsr = musb_readw(mbase, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 					if (channel->desired_mode == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 						txcsr &= ~(MUSB_TXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 							| MUSB_TXCSR_AUTOSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 						musb_writew(mbase, offset, txcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 						/* Send out the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 						txcsr &= ~MUSB_TXCSR_DMAMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 						txcsr |= MUSB_TXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 					txcsr |=  MUSB_TXCSR_TXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 					musb_writew(mbase, offset, txcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 				musb_dma_completion(musb, musb_channel->epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 						    musb_channel->transmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) EXPORT_SYMBOL_GPL(dma_controller_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) void musbhs_dma_controller_destroy(struct dma_controller *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct musb_dma_controller *controller = container_of(c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			struct musb_dma_controller, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	dma_controller_stop(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (controller->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		free_irq(controller->irq, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	kfree(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) EXPORT_SYMBOL_GPL(musbhs_dma_controller_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static struct musb_dma_controller *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dma_controller_alloc(struct musb *musb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct musb_dma_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (!controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	controller->channel_count = MUSB_HSDMA_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	controller->private_data = musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	controller->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	controller->controller.channel_alloc = dma_channel_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	controller->controller.channel_release = dma_channel_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	controller->controller.channel_program = dma_channel_program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	controller->controller.channel_abort = dma_channel_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	return controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct dma_controller *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) musbhs_dma_controller_create(struct musb *musb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct musb_dma_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct device *dev = musb->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	int irq = platform_get_irq_byname(pdev, "dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	if (irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		dev_err(dev, "No DMA interrupt line!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	controller = dma_controller_alloc(musb, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	if (!controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (request_irq(irq, dma_controller_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			dev_name(musb->controller), controller)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		dev_err(dev, "request_irq %d failed!\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		musb_dma_controller_destroy(&controller->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	controller->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	return &controller->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) EXPORT_SYMBOL_GPL(musbhs_dma_controller_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct dma_controller *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) musbhs_dma_controller_create_noirq(struct musb *musb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	struct musb_dma_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	controller = dma_controller_alloc(musb, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	if (!controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	return &controller->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) EXPORT_SYMBOL_GPL(musbhs_dma_controller_create_noirq);