Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * linux/include/asm/dma.h: Defines for using and allocating dma channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Written by Hennus Bergman, 1992.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * High DMA channel support & info by Hannu Savolainen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * and John Boyd, Nov. 1992.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #ifndef _ASM_X86_DMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define _ASM_X86_DMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/spinlock.h>	/* And spinlocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/io.h>		/* need byte IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define dma_outb	outb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define dma_outb	outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define dma_inb		inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * NOTES about DMA transfers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *  controller 1: channels 0-3, byte operations, ports 00-1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *  controller 2: channels 4-7, word operations, ports C0-DF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *  - ALL registers are 8 bits only, regardless of transfer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *  - channel 4 is not used - cascades 1 into 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *  - channels 0-3 are byte - addresses/counts are for physical bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *  - channels 5-7 are word - addresses/counts are for physical words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *  - transfer count loaded to registers is 1 less than actual count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *  - controller 2 offsets are all even (2x offsets for controller 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *  - page registers for 5-7 don't use data bit 0, represent 128K pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *  - page registers for 0-3 use bit 0, represent 64K pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * DMA transfers are limited to the lower 16MB of _physical_ memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * Note that addresses loaded into registers must be _physical_ addresses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * not logical addresses (which may differ if paging is active).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *  Address mapping for channels 0-3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *    |  ...  |   |  ... |   |  ... |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *    |  ...  |   |  ... |   |  ... |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *    |  ...  |   |  ... |   |  ... |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *   P7  ...  P0  A7 ... A0  A7 ... A0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *  Address mapping for channels 5-7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *    |  ...  |   \   \   ... \  \  \  ... \  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *    |  ...  |     \   \   ... \  \  \  ... \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * the hardware level, so odd-byte transfers aren't possible).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * Transfer count (_not # bytes_) is limited to 64K, represented as actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * and up to 128K bytes may be transferred on channels 5-7 in one operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define MAX_DMA_CHANNELS	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /* 16MB ISA DMA zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define MAX_DMA_PFN   ((16UL * 1024 * 1024) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /* 4GB broken PCI/AGP hardware bus master zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) /* The maximum address that we can perform a DMA transfer to on this platform */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define MAX_DMA_ADDRESS      (PAGE_OFFSET + 0x1000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /* Compat define for old dma zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /* 8237 DMA controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /* DMA controller registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define DMA1_CMD_REG		0x08	/* command register (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define DMA1_STAT_REG		0x08	/* status register (r) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define DMA1_REQ_REG		0x09    /* request register (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define DMA1_MODE_REG		0x0B	/* mode register (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define DMA1_TEMP_REG		0x0D    /* Temporary Register (r) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define DMA2_CMD_REG		0xD0	/* command register (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define DMA2_STAT_REG		0xD0	/* status register (r) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define DMA2_REQ_REG		0xD2    /* request register (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define DMA2_MODE_REG		0xD6	/* mode register (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define DMA2_TEMP_REG		0xDA    /* Temporary Register (r) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define DMA_ADDR_0		0x00    /* DMA address registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define DMA_ADDR_1		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define DMA_ADDR_2		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define DMA_ADDR_3		0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define DMA_ADDR_4		0xC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define DMA_ADDR_5		0xC4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define DMA_ADDR_6		0xC8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define DMA_ADDR_7		0xCC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define DMA_CNT_0		0x01    /* DMA count registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define DMA_CNT_1		0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define DMA_CNT_2		0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define DMA_CNT_3		0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define DMA_CNT_4		0xC2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define DMA_CNT_5		0xC6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define DMA_CNT_6		0xCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define DMA_CNT_7		0xCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define DMA_PAGE_0		0x87    /* DMA page registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define DMA_PAGE_1		0x83
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define DMA_PAGE_2		0x81
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define DMA_PAGE_3		0x82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define DMA_PAGE_5		0x8B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define DMA_PAGE_6		0x89
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define DMA_PAGE_7		0x8A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* I/O to memory, no autoinit, increment, single mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define DMA_MODE_READ		0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* memory to I/O, no autoinit, increment, single mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define DMA_MODE_WRITE		0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* pass thru DREQ->HRQ, DACK<-HLDA only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define DMA_MODE_CASCADE	0xC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define DMA_AUTOINIT		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #ifdef CONFIG_ISA_DMA_API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) extern spinlock_t  dma_spin_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline unsigned long claim_dma_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	spin_lock_irqsave(&dma_spin_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void release_dma_lock(unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	spin_unlock_irqrestore(&dma_spin_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif /* CONFIG_ISA_DMA_API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* enable/disable a specific DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline void enable_dma(unsigned int dmanr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (dmanr <= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		dma_outb(dmanr, DMA1_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		dma_outb(dmanr & 3, DMA2_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline void disable_dma(unsigned int dmanr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (dmanr <= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		dma_outb(dmanr | 4, DMA1_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Clear the 'DMA Pointer Flip Flop'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * Write 0 for LSB/MSB, 1 for MSB/LSB access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Use this once to initialize the FF to a known state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * After that, keep track of it. :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * --- In order to do that, the DMA routines below should ---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * --- only be used while holding the DMA lock ! ---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static inline void clear_dma_ff(unsigned int dmanr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (dmanr <= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		dma_outb(0, DMA1_CLEAR_FF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		dma_outb(0, DMA2_CLEAR_FF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* set mode (above) for a specific DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline void set_dma_mode(unsigned int dmanr, char mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (dmanr <= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		dma_outb(mode | dmanr, DMA1_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Set only the page register bits of the transfer address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * This is used for successive transfers when we know the contents of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * the lower 16 bits of the DMA current address register, but a 64k boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * may have been crossed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void set_dma_page(unsigned int dmanr, char pagenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	switch (dmanr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		dma_outb(pagenr, DMA_PAGE_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		dma_outb(pagenr, DMA_PAGE_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		dma_outb(pagenr, DMA_PAGE_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		dma_outb(pagenr, DMA_PAGE_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		dma_outb(pagenr & 0xfe, DMA_PAGE_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		dma_outb(pagenr & 0xfe, DMA_PAGE_6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		dma_outb(pagenr & 0xfe, DMA_PAGE_7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Set transfer address & page bits for specific DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * Assumes dma flipflop is clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	set_dma_page(dmanr, a>>16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (dmanr <= 3)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	}  else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * a specific DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * You must ensure the parameters are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * NOTE: from a manual: "the number of transfers is one more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * than the initial word count"! This is taken into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * Assumes dma flip-flop is clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static inline void set_dma_count(unsigned int dmanr, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (dmanr <= 3)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		dma_outb((count >> 8) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		dma_outb((count >> 1) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		dma_outb((count >> 9) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Get DMA residue count. After a DMA transfer, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * should return zero. Reading this while a DMA transfer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * still in progress will return unpredictable results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * If called before the channel has been used, it may return 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * Otherwise, it returns the number of _bytes_ left to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * Assumes DMA flip-flop is clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static inline int get_dma_residue(unsigned int dmanr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	unsigned int io_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	/* using short to get 16-bit wrap around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	unsigned short count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		: ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	count = 1 + dma_inb(io_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	count += dma_inb(io_port) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	return (dmanr <= 3) ? count : (count << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #ifdef CONFIG_ISA_DMA_API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) extern int request_dma(unsigned int dmanr, const char *device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) extern void free_dma(unsigned int dmanr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* From PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) extern int isa_dma_bridge_buggy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define isa_dma_bridge_buggy	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #endif /* _ASM_X86_DMA_H */