^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/zorro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <net/ax88796.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/amigaints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define XS100_IRQSTATUS_BASE 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define XS100_8390_BASE 0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Longword-access area. Translated to 2 16-bit access cycles by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * X-Surf 100 FPGA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define XS100_8390_DATA32_BASE 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define XS100_8390_DATA32_SIZE 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Sub-Areas for fast data register access; addresses relative to area begin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define XS100_8390_DATA_READ32_BASE 0x0880
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define XS100_8390_DATA_WRITE32_BASE 0x0C80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define XS100_8390_DATA_AREA_SIZE 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define __NS8390_init ax_NS8390_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* force unsigned long back to 'void __iomem *' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define ax_convert_addr(_a) ((void __force __iomem *)(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define ei_inb(_a) z_readb(ax_convert_addr(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define ei_inw(_a) z_readw(ax_convert_addr(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define ei_inb_p(_a) ei_inb(_a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define ei_outb_p(_v, _a) ei_outb(_v, _a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* define EI_SHIFT() to take into account our register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define EI_SHIFT(x) (ei_local->reg_offset[(x)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Ensure we have our RCR base value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define AX88796_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static unsigned char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "lib8390.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* from ne.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define NE_CMD EI_SHIFT(0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define NE_RESET EI_SHIFT(0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define NE_DATAPORT EI_SHIFT(0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct xsurf100_ax_plat_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct ax_plat_data ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void __iomem *base_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void __iomem *data_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int is_xsurf100_network_irq(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* These functions guarantee that the iomem is accessed with 32 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * cycles only. z_memcpy_fromio / z_memcpy_toio don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) while (bytes > 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) asm __volatile__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ("movem.l (%0)+,%%d0-%%d7\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "movem.l %%d0-%%d7,(%1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) "adda.l #32,%1" : "=a"(src), "=a"(dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "d5", "d6", "d7", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bytes -= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) while (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *(uint32_t *)dst = z_readl(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bytes -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) while (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) z_writel(*(const uint32_t *)src, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bytes -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void xs100_write(struct net_device *dev, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct platform_device *pdev = to_platform_device(dev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* copy whole blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) while (count > XS100_8390_DATA_AREA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) z_memcpy_toio32(xs100->data_area +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) XS100_8390_DATA_WRITE32_BASE, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) XS100_8390_DATA_AREA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) src += XS100_8390_DATA_AREA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) count -= XS100_8390_DATA_AREA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* copy whole dwords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) src, count & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) src += count & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (count & 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) src += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (count & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void xs100_read(struct net_device *dev, void *dst, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct platform_device *pdev = to_platform_device(dev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* copy whole blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) while (count > XS100_8390_DATA_AREA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) z_memcpy_fromio32(dst, xs100->data_area +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) XS100_8390_DATA_READ32_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) XS100_8390_DATA_AREA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dst += XS100_8390_DATA_AREA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) count -= XS100_8390_DATA_AREA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* copy whole dwords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) count & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dst += count & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (count & 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dst += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (count & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Block input and output, similar to the Crynwr packet driver. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * you are porting to a new ethercard, look at the packet driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * source for hints. The NEx000 doesn't share the on-board packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * memory -- you have to put the packet out through the "remote DMA"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * dataport using ei_outb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void xs100_block_input(struct net_device *dev, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct sk_buff *skb, int ring_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void __iomem *nic_base = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) char *buf = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (ei_local->dmaing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) netdev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ei_local->dmaing, ei_local->irqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ei_local->dmaing |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ei_outb(count >> 8, nic_base + EN0_RCNTHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) xs100_read(dev, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ei_local->dmaing &= ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void xs100_block_output(struct net_device *dev, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) const unsigned char *buf, const int start_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ei_device *ei_local = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void __iomem *nic_base = ei_local->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long dma_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Round the count up for word writes. Do we need to do this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * What effect will an odd byte count have on the 8390? I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * should check someday.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (ei_local->word16 && (count & 0x01))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* This *shouldn't* happen. If it does, it's the last thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * you'll see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (ei_local->dmaing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) netdev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ei_local->dmaing, ei_local->irqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ei_local->dmaing |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* We should already be in page 0, but to be safe... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ei_outb(ENISR_RDC, nic_base + EN0_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Now the normal output. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ei_outb(count >> 8, nic_base + EN0_RCNTHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ei_outb(0x00, nic_base + EN0_RSARLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ei_outb(start_page, nic_base + EN0_RSARHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) xs100_write(dev, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dma_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) netdev_warn(dev, "timeout waiting for Tx RDC.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ei_local->reset_8390(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ax_NS8390_init(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ei_local->dmaing &= ~0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int xsurf100_probe(struct zorro_dev *zdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) const struct zorro_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct xsurf100_ax_plat_data ax88796_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct resource res[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 4 * 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* This table is referenced in the device structure, so it must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * outlive the scope of xsurf100_probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static u32 reg_offsets[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* X-Surf 100 control and 32 bit ring buffer data access areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * These resources are not used by the ax88796 driver, so must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * be requested here and passed via platform data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!request_mem_region(zdev->resource.start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) XS100_8390_DATA32_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) XS100_8390_DATA32_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) "X-Surf 100 32-bit data access")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dev_err(&zdev->dev, "cannot reserve 32-bit area\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) goto exit_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) for (reg = 0; reg < 0x20; reg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) reg_offsets[reg] = 4 * reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) memset(&ax88796_data, 0, sizeof(ax88796_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ax88796_data.ax.flags = AXFLG_HAS_EEPROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ax88796_data.ax.wordlength = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ax88796_data.ax.dcr_val = 0x48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ax88796_data.ax.rcr_val = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ax88796_data.ax.reg_offsets = reg_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ax88796_data.ax.check_irq = is_xsurf100_network_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* error handling for ioremap regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!ax88796_data.base_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) &zdev->resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) goto exit_req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ax88796_data.data_area = ioremap(zdev->resource.start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* error handling for ioremap data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!ax88796_data.data_area) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dev_err(&zdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) "Cannot ioremap area %pR offset %x (32-bit access)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) &zdev->resource, XS100_8390_DATA32_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto exit_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ax88796_data.ax.block_output = xs100_block_output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ax88796_data.ax.block_input = xs100_block_input;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pdev = platform_device_register_resndata(&zdev->dev, "ax88796",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) zdev->slotaddr, res, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) &ax88796_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sizeof(ax88796_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (IS_ERR(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dev_err(&zdev->dev, "cannot register platform device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto exit_mem2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) zorro_set_drvdata(zdev, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) exit_mem2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) iounmap(ax88796_data.data_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) exit_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) iounmap(ax88796_data.base_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) exit_req2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) XS100_8390_DATA32_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) exit_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) release_mem_region(zdev->resource.start, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void xsurf100_remove(struct zorro_dev *zdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct platform_device *pdev = zorro_get_drvdata(zdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) platform_device_unregister(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) iounmap(xs100->base_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) release_mem_region(zdev->resource.start, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) iounmap(xs100->data_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) XS100_8390_DATA32_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static const struct zorro_device_id xsurf100_zorro_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) { 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct zorro_driver xsurf100_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .name = "xsurf100",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .id_table = xsurf100_zorro_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .probe = xsurf100_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .remove = xsurf100_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) MODULE_DESCRIPTION("X-Surf 100 driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) MODULE_AUTHOR("Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) MODULE_LICENSE("GPL v2");