Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Support for the Tundra Universe I/II VME-PCI Bridge Chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Author: Martyn Welch <martyn.welch@ge.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Based on work by Tom Armistead and Ajit Prem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright 2004 Motorola Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Derived from ca91c042.c by Michael Wyrick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/vme.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "../vme_bridge.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "vme_ca91cx42.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static void ca91cx42_remove(struct pci_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) /* Module parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static int geoid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static const char driver_name[] = "vme_ca91cx42";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static const struct pci_device_id ca91cx42_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) MODULE_DEVICE_TABLE(pci, ca91cx42_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static struct pci_driver ca91cx42_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	.name = driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	.id_table = ca91cx42_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	.probe = ca91cx42_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	.remove = ca91cx42_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	wake_up(&bridge->dma_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	return CA91CX42_LINT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	u32 serviced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		if (stat & CA91CX42_LINT_LM[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			/* We only enable interrupts if the callback is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 			bridge->lm_callback[i](bridge->lm_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 			serviced |= CA91CX42_LINT_LM[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	return serviced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) /* XXX This needs to be split into 4 queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	wake_up(&bridge->mbox_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	return CA91CX42_LINT_MBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	wake_up(&bridge->iack_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	return CA91CX42_LINT_SW_IACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	val = ioread32(bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	if (!(val & 0x00000800)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 			"Read Error DGCS=%08X\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	return CA91CX42_LINT_VERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	val = ioread32(bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	if (!(val & 0x00000800))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			"Read Error DGCS=%08X\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	return CA91CX42_LINT_LERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	int stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	int vec, i, serviced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	for (i = 7; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		if (stat & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			vec = ioread32(bridge->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 				CA91CX42_V_STATID[i]) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			vme_irq_handler(ca91cx42_bridge, i, vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			serviced |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	return serviced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	u32 stat, enable, serviced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct vme_bridge *ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	ca91cx42_bridge = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	enable = ioread32(bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	stat = ioread32(bridge->base + LINT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	/* Only look at unmasked interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	stat &= enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	if (unlikely(!stat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (stat & CA91CX42_LINT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		serviced |= ca91cx42_DMA_irqhandler(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			CA91CX42_LINT_LM3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		serviced |= ca91cx42_LM_irqhandler(bridge, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (stat & CA91CX42_LINT_MBOX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		serviced |= ca91cx42_MB_irqhandler(bridge, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	if (stat & CA91CX42_LINT_SW_IACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		serviced |= ca91cx42_IACK_irqhandler(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	if (stat & CA91CX42_LINT_VERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	if (stat & CA91CX42_LINT_LERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			CA91CX42_LINT_VIRQ7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	/* Clear serviced interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	iowrite32(serviced, bridge->base + LINT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	int result, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	/* Need pdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	pdev = to_pci_dev(ca91cx42_bridge->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	/* Disable interrupts from PCI to VME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	iowrite32(0, bridge->base + VINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	/* Disable PCI interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	iowrite32(0, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/* Clear Any Pending PCI Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			driver_name, ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		       pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	/* Ensure all interrupts are mapped to PCI Interrupt 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	iowrite32(0, bridge->base + LINT_MAP0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	iowrite32(0, bridge->base + LINT_MAP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	iowrite32(0, bridge->base + LINT_MAP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	/* Enable DMA, mailbox & LM Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	iowrite32(tmp, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct vme_bridge *ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/* Disable interrupts from PCI to VME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	iowrite32(0, bridge->base + VINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* Disable PCI interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	iowrite32(0, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	/* Clear Any Pending PCI Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 				       driver_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	free_irq(pdev->irq, ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	tmp = ioread32(bridge->base + LINT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	if (tmp & (1 << level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  * Set up an VME interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	int state, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	/* Enable IRQ level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	tmp = ioread32(bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	if (state == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		tmp &= ~CA91CX42_LINT_VIRQ[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		tmp |= CA91CX42_LINT_VIRQ[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	iowrite32(tmp, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if ((state == 0) && (sync != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		pdev = to_pci_dev(ca91cx42_bridge->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		synchronize_irq(pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	int statid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/* Universe can only generate even vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	if (statid & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	mutex_lock(&bridge->vme_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	tmp = ioread32(bridge->base + VINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/* Set Status/ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	iowrite32(statid << 24, bridge->base + STATID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	/* Assert VMEbus IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	tmp = tmp | (1 << (level + 24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	iowrite32(tmp, bridge->base + VINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	/* Wait for IACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	wait_event_interruptible(bridge->iack_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 				 ca91cx42_iack_received(bridge, level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	/* Return interrupt to low state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	tmp = ioread32(bridge->base + VINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	tmp = tmp & ~(1 << (level + 24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	iowrite32(tmp, bridge->base + VINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	mutex_unlock(&bridge->vme_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	unsigned long long vme_base, unsigned long long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	dma_addr_t pci_base, u32 aspace, u32 cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	unsigned int i, addr = 0, granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	unsigned int temp_ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	unsigned int vme_bound, pci_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct vme_bridge *ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	ca91cx42_bridge = image->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	i = image->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	switch (aspace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	case VME_A16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		addr |= CA91CX42_VSI_CTL_VAS_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	case VME_A24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		addr |= CA91CX42_VSI_CTL_VAS_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	case VME_A32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		addr |= CA91CX42_VSI_CTL_VAS_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	case VME_USER1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		addr |= CA91CX42_VSI_CTL_VAS_USER1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	case VME_USER2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		addr |= CA91CX42_VSI_CTL_VAS_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	case VME_A64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	case VME_CRCSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	case VME_USER3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	case VME_USER4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	 * Bound address is a valid address for the window, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	 * accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	vme_bound = vme_base + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	pci_offset = pci_base - vme_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if ((i == 0) || (i == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		granularity = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		granularity = 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (vme_base & (granularity - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		dev_err(ca91cx42_bridge->parent, "Invalid VME base "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			"alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (vme_bound & (granularity - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			"alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (pci_offset & (granularity - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			"alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	/* Disable while we are mucking around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	temp_ctl &= ~CA91CX42_VSI_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	/* Setup mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	/* Setup address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	temp_ctl |= addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* Setup cycle types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (cycle & VME_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (cycle & VME_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	if (cycle & VME_PROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (cycle & VME_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/* Write ctl reg without enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		temp_ctl |= CA91CX42_VSI_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	unsigned long long *vme_base, unsigned long long *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	unsigned int i, granularity = 0, ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	unsigned long long vme_bound, pci_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	bridge = image->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	i = image->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if ((i == 0) || (i == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		granularity = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		granularity = 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	/* Read Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	*pci_base = (dma_addr_t)*vme_base + pci_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	*enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	*aspace = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	*cycle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (ctl & CA91CX42_VSI_CTL_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		*enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		*aspace = VME_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		*aspace = VME_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		*aspace = VME_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		*aspace = VME_USER1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		*aspace = VME_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		*cycle |= VME_SUPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		*cycle |= VME_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		*cycle |= VME_PROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		*cycle |= VME_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * Allocate and map PCI Resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static int ca91cx42_alloc_resource(struct vme_master_resource *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	unsigned long long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	unsigned long long existing_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct vme_bridge *ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	ca91cx42_bridge = image->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	/* Find pci_dev container of dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (!ca91cx42_bridge->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	pdev = to_pci_dev(ca91cx42_bridge->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	existing_size = (unsigned long long)(image->bus_resource.end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		image->bus_resource.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	/* If the existing size is OK, return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (existing_size == (size - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (existing_size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		iounmap(image->kern_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		image->kern_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		kfree(image->bus_resource.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		release_resource(&image->bus_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		memset(&image->bus_resource, 0, sizeof(image->bus_resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (!image->bus_resource.name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		if (!image->bus_resource.name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			goto err_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	sprintf((char *)image->bus_resource.name, "%s.%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		ca91cx42_bridge->name, image->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	image->bus_resource.start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	image->bus_resource.end = (unsigned long)size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	image->bus_resource.flags = IORESOURCE_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	retval = pci_bus_alloc_resource(pdev->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			"resource for window %d size 0x%lx start 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			image->number, (unsigned long)size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			(unsigned long)image->bus_resource.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		goto err_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	image->kern_base = ioremap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		image->bus_resource.start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	if (!image->kern_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		goto err_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) err_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	release_resource(&image->bus_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) err_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	kfree(image->bus_resource.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) err_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  * Free and unmap PCI Resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static void ca91cx42_free_resource(struct vme_master_resource *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	iounmap(image->kern_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	image->kern_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	release_resource(&image->bus_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	kfree(image->bus_resource.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	unsigned long long vme_base, unsigned long long size, u32 aspace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	u32 cycle, u32 dwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	unsigned int i, granularity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	unsigned int temp_ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	unsigned long long pci_bound, vme_offset, pci_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct vme_bridge *ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	ca91cx42_bridge = image->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	i = image->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if ((i == 0) || (i == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		granularity = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		granularity = 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	/* Verify input data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (vme_base & (granularity - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			"alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		goto err_window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (size & (granularity - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			"alignment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		goto err_window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	spin_lock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * Let's allocate the resource here rather than further up the stack as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * it avoids pushing loads of bus dependent stuff up the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	retval = ca91cx42_alloc_resource(image, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			"for resource name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		goto err_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	pci_base = (unsigned long long)image->bus_resource.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * Bound address is a valid address for the window, adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * according to window granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	pci_bound = pci_base + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	vme_offset = vme_base - pci_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* Disable while we are mucking around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	temp_ctl &= ~CA91CX42_LSI_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	/* Setup cycle types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (cycle & VME_BLT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (cycle & VME_MBLT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/* Setup data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	switch (dwidth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	case VME_D8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	case VME_D16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	case VME_D32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	case VME_D64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		goto err_dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* Setup address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	switch (aspace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	case VME_A16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	case VME_A24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	case VME_A32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	case VME_CRCSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	case VME_USER1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	case VME_USER2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	case VME_A64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	case VME_USER3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	case VME_USER4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		goto err_aspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (cycle & VME_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (cycle & VME_PROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/* Setup mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* Write ctl reg without enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		temp_ctl |= CA91CX42_LSI_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) err_aspace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) err_dwidth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	ca91cx42_free_resource(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) err_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) err_window:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static int __ca91cx42_master_get(struct vme_master_resource *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	int *enabled, unsigned long long *vme_base, unsigned long long *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	u32 *aspace, u32 *cycle, u32 *dwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	unsigned int i, ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	unsigned long long pci_base, pci_bound, vme_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	bridge = image->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	i = image->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	*vme_base = pci_base + vme_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	*size = (unsigned long long)(pci_bound - pci_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	*enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	*aspace = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	*cycle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	*dwidth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (ctl & CA91CX42_LSI_CTL_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		*enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	/* Setup address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	case CA91CX42_LSI_CTL_VAS_A16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		*aspace = VME_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	case CA91CX42_LSI_CTL_VAS_A24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		*aspace = VME_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	case CA91CX42_LSI_CTL_VAS_A32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		*aspace = VME_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	case CA91CX42_LSI_CTL_VAS_CRCSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		*aspace = VME_CRCSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	case CA91CX42_LSI_CTL_VAS_USER1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		*aspace = VME_USER1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	case CA91CX42_LSI_CTL_VAS_USER2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		*aspace = VME_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	/* XXX Not sure howto check for MBLT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/* Setup cycle types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		*cycle |= VME_BLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		*cycle |= VME_SCT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		*cycle |= VME_SUPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		*cycle |= VME_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		*cycle = VME_PROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		*cycle = VME_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	/* Setup data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	case CA91CX42_LSI_CTL_VDW_D8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		*dwidth = VME_D8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	case CA91CX42_LSI_CTL_VDW_D16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		*dwidth = VME_D16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	case CA91CX42_LSI_CTL_VDW_D32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		*dwidth = VME_D32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	case CA91CX42_LSI_CTL_VDW_D64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		*dwidth = VME_D64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	u32 *cycle, u32 *dwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	spin_lock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		cycle, dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	void *buf, size_t count, loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	ssize_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	void __iomem *addr = image->kern_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	unsigned int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	unsigned int count32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	spin_lock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/* The following code handles VME address alignment. We cannot use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * memcpy_xxx here because it may cut data transfers in to 8-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * cycles when D16 or D32 cycles are required on the VME bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * On the other hand, the bridge itself assures that the maximum data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * cycle configured for the transfer is used and splits it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 * automatically for non-aligned addresses, so we don't want the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	 * overhead of needlessly forcing small transfers for the entire cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if ((uintptr_t)addr & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		*(u8 *)buf = ioread8(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		done += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		if (done == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if ((uintptr_t)(addr + done) & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if ((count - done) < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			*(u8 *)(buf + done) = ioread8(addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			done += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			*(u16 *)(buf + done) = ioread16(addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			done += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	count32 = (count - done) & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	while (done < count32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		*(u32 *)(buf + done) = ioread32(addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		done += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if ((count - done) & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		*(u16 *)(buf + done) = ioread16(addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		done += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if ((count - done) & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		*(u8 *)(buf + done) = ioread8(addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		done += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	retval = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	void *buf, size_t count, loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	ssize_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	void __iomem *addr = image->kern_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	unsigned int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	unsigned int count32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	spin_lock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	/* Here we apply for the same strategy we do in master_read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	 * function in order to assure the correct cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if ((uintptr_t)addr & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		iowrite8(*(u8 *)buf, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		done += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		if (done == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if ((uintptr_t)(addr + done) & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if ((count - done) < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			iowrite8(*(u8 *)(buf + done), addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			done += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			iowrite16(*(u16 *)(buf + done), addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			done += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	count32 = (count - done) & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	while (done < count32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		iowrite32(*(u32 *)(buf + done), addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		done += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if ((count - done) & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		iowrite16(*(u16 *)(buf + done), addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		done += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if ((count - done) & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		iowrite8(*(u8 *)(buf + done), addr + done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		done += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	retval = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	unsigned int mask, unsigned int compare, unsigned int swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	loff_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	uintptr_t pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	bridge = image->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	dev = image->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* Find the PCI address that maps to the desired VME address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	/* Locking as we can only do one of these at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	mutex_lock(&bridge->vme_rmw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* Lock image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	spin_lock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	pci_addr = (uintptr_t)image->kern_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/* Address must be 4-byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	if (pci_addr & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		dev_err(dev, "RMW Address not 4-byte aligned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/* Ensure RMW Disabled whilst configuring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	iowrite32(0, bridge->base + SCYC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	/* Configure registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	iowrite32(mask, bridge->base + SCYC_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	iowrite32(compare, bridge->base + SCYC_CMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	iowrite32(swap, bridge->base + SCYC_SWP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	iowrite32(pci_addr, bridge->base + SCYC_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/* Enable RMW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* Kick process off with a read to the required address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	result = ioread32(image->kern_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	/* Disable RMW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	iowrite32(0, bridge->base + SCYC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	spin_unlock(&image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	mutex_unlock(&bridge->vme_rmw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int ca91cx42_dma_list_add(struct vme_dma_list *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct ca91cx42_dma_entry *entry, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct vme_dma_pci *pci_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	struct vme_dma_vme *vme_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	dma_addr_t desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	dev = list->parent->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	/* XXX descriptor must be aligned on 64-bit boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		goto err_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	/* Test descriptor alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			"required: %p\n", &entry->descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		goto err_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	memset(&entry->descriptor, 0, sizeof(entry->descriptor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (dest->type == VME_DMA_VME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		vme_attr = dest->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		pci_attr = src->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		vme_attr = src->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		pci_attr = dest->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* Check we can do fulfill required attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		VME_USER2)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		dev_err(dev, "Unsupported cycle type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		goto err_aspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		VME_PROG | VME_DATA)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		dev_err(dev, "Unsupported cycle type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		goto err_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/* Check to see if we can fulfill source and destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		dev_err(dev, "Cannot perform transfer with this "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			"source-destination combination\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		goto err_direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	/* Setup cycle types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (vme_attr->cycle & VME_BLT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	/* Setup data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	switch (vme_attr->dwidth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	case VME_D8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	case VME_D16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	case VME_D32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	case VME_D64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		dev_err(dev, "Invalid data width\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	/* Setup address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	switch (vme_attr->aspace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	case VME_A16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	case VME_A24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	case VME_A32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	case VME_USER1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	case VME_USER2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		dev_err(dev, "Invalid address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (vme_attr->cycle & VME_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (vme_attr->cycle & VME_PROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	entry->descriptor.dtbc = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	entry->descriptor.dla = pci_attr->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	entry->descriptor.dva = vme_attr->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	/* Add to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	list_add_tail(&entry->list, &list->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	/* Fill out previous descriptors "Next Address" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (entry->list.prev != &list->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		/* We need the bus address for the pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		desc_ptr = virt_to_bus(&entry->descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) err_cycle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) err_aspace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) err_direct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) err_align:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) err_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	tmp = ioread32(bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (tmp & CA91CX42_DGCS_ACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct vme_dma_resource *ctrlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	struct ca91cx42_dma_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	dma_addr_t bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	ctrlr = list->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	bridge = ctrlr->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	dev = ctrlr->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	mutex_lock(&ctrlr->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	if (!(list_empty(&ctrlr->running))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		 * XXX We have an active DMA transfer and currently haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		 *     sorted out the mechanism for "pending" DMA transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		 *     Return busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		/* Need to add to pending here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		mutex_unlock(&ctrlr->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		list_add(&list->list, &ctrlr->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	/* Get first bus address and write into registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	bus_addr = virt_to_bus(&entry->descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	mutex_unlock(&ctrlr->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	iowrite32(0, bridge->base + DTBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	/* Start the operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	val = ioread32(bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	/* XXX Could set VMEbus On and Off Counters here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		CA91CX42_DGCS_PERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	iowrite32(val, bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	val |= CA91CX42_DGCS_GO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	iowrite32(val, bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	retval = wait_event_interruptible(bridge->dma_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 					  ca91cx42_dma_busy(ctrlr->parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		val = ioread32(bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		iowrite32(val | CA91CX42_DGCS_STOP_REQ, bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		/* Wait for the operation to abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		wait_event(bridge->dma_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			   ca91cx42_dma_busy(ctrlr->parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		retval = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	 * Read status register, this register is valid until we kick off a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	 * new transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	val = ioread32(bridge->base + DGCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		CA91CX42_DGCS_PERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		val = ioread32(bridge->base + DCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	/* Remove list from running list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	mutex_lock(&ctrlr->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	list_del(&list->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	mutex_unlock(&ctrlr->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	struct list_head *pos, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct ca91cx42_dma_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	/* detach and free each entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	list_for_each_safe(pos, temp, &list->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * All 4 location monitors reside at the same base - this is therefore a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * system wide configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * This does not enable the LM monitor - that should be done when the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  * callback is attached and disabled when the last callback is removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static int ca91cx42_lm_set(struct vme_lm_resource *lm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	unsigned long long lm_base, u32 aspace, u32 cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	u32 temp_base, lm_ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	bridge = lm->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	dev = lm->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	/* Check the alignment of the location monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	temp_base = (u32)lm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (temp_base & 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		dev_err(dev, "Location monitor must be aligned to 64KB "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			"boundary");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	mutex_lock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	/* If we already have a callback attached, we can't move it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	for (i = 0; i < lm->monitors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		if (bridge->lm_callback[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			dev_err(dev, "Location monitor callback attached, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				"can't reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	switch (aspace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	case VME_A16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		lm_ctl |= CA91CX42_LM_CTL_AS_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	case VME_A24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		lm_ctl |= CA91CX42_LM_CTL_AS_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	case VME_A32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		dev_err(dev, "Invalid address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (cycle & VME_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		lm_ctl |= CA91CX42_LM_CTL_SUPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (cycle & VME_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		lm_ctl |= CA91CX42_LM_CTL_NPRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	if (cycle & VME_PROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		lm_ctl |= CA91CX42_LM_CTL_PGM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (cycle & VME_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		lm_ctl |= CA91CX42_LM_CTL_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	iowrite32(lm_base, bridge->base + LM_BS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	iowrite32(lm_ctl, bridge->base + LM_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* Get configuration of the callback monitor and return whether it is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)  * or disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static int ca91cx42_lm_get(struct vme_lm_resource *lm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	u32 lm_ctl, enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	bridge = lm->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	mutex_lock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	lm_ctl = ioread32(bridge->base + LM_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	if (lm_ctl & CA91CX42_LM_CTL_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		*aspace = VME_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		*aspace = VME_A24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		*aspace = VME_A32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	*cycle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	if (lm_ctl & CA91CX42_LM_CTL_SUPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		*cycle |= VME_SUPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		*cycle |= VME_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (lm_ctl & CA91CX42_LM_CTL_PGM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		*cycle |= VME_PROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (lm_ctl & CA91CX42_LM_CTL_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		*cycle |= VME_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	return enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * Attach a callback to a specific location monitor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * Callback will be passed the monitor triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	void (*callback)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	u32 lm_ctl, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	bridge = lm->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	dev = lm->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	mutex_lock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	/* Ensure that the location monitor is configured - need PGM or DATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	lm_ctl = ioread32(bridge->base + LM_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		dev_err(dev, "Location monitor not properly configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	/* Check that a callback isn't already attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (bridge->lm_callback[monitor]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		dev_err(dev, "Existing callback attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	/* Attach callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	bridge->lm_callback[monitor] = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	bridge->lm_data[monitor] = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	/* Enable Location Monitor interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	tmp = ioread32(bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	tmp |= CA91CX42_LINT_LM[monitor];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	iowrite32(tmp, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	/* Ensure that global Location Monitor Enable set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		lm_ctl |= CA91CX42_LM_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		iowrite32(lm_ctl, bridge->base + LM_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * Detach a callback function forn a specific location monitor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	bridge = lm->parent->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	mutex_lock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	/* Disable Location Monitor and ensure previous interrupts are clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	tmp = ioread32(bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	tmp &= ~CA91CX42_LINT_LM[monitor];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	iowrite32(tmp, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	iowrite32(CA91CX42_LINT_LM[monitor],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		 bridge->base + LINT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	/* Detach callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	bridge->lm_callback[monitor] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	bridge->lm_data[monitor] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	/* If all location monitors disabled, disable global Location Monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			CA91CX42_LINT_LM3)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		tmp = ioread32(bridge->base + LM_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		tmp &= ~CA91CX42_LM_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		iowrite32(tmp, bridge->base + LM_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	mutex_unlock(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	u32 slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (!geoid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		slot = ioread32(bridge->base + VCSR_BS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		slot = geoid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	return (int)slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	dma_addr_t *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	/* Find pci_dev container of dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	pdev = to_pci_dev(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	return pci_alloc_consistent(pdev, size, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) static void ca91cx42_free_consistent(struct device *parent, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	void *vaddr, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	/* Find pci_dev container of dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	pdev = to_pci_dev(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	pci_free_consistent(pdev, size, vaddr, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * Configure CR/CSR space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  * Access to the CR/CSR can be configured at power-up. The location of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  * CR/CSR registers in the CR/CSR address space is determined by the boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  * Auto-ID or Geographic address. This function ensures that the window is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  * enabled at an offset consistent with the boards geopgraphic address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	unsigned int crcsr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	int tmp, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	slot = ca91cx42_slot_get(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	/* Write CSR Base Address if slot ID is supplied as a module param */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	if (geoid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		iowrite32(geoid << 27, bridge->base + VCSR_BS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	if (slot == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		dev_err(&pdev->dev, "Slot number is unset, not configuring "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			"CR/CSR space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	/* Allocate mem for CR/CSR image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 						     &bridge->crcsr_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (!bridge->crcsr_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			"image\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	crcsr_addr = slot * (512 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	tmp = ioread32(bridge->base + VCSR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	tmp |= CA91CX42_VCSR_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	iowrite32(tmp, bridge->base + VCSR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	/* Turn off CR/CSR space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	tmp = ioread32(bridge->base + VCSR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	tmp &= ~CA91CX42_VCSR_CTL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	iowrite32(tmp, bridge->base + VCSR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	/* Free image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	iowrite32(0, bridge->base + VCSR_TO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		bridge->crcsr_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	int retval, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	struct list_head *pos = NULL, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	struct vme_bridge *ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	struct ca91cx42_driver *ca91cx42_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	struct vme_master_resource *master_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	struct vme_slave_resource *slave_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	struct vme_dma_resource *dma_ctrlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	struct vme_lm_resource *lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	/* We want to support more than one of each bridge so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	 * dynamically allocate the bridge structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	if (!ca91cx42_bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		goto err_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	vme_init_bridge(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	if (!ca91cx42_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		goto err_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	ca91cx42_bridge->driver_priv = ca91cx42_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	/* Enable the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	retval = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		dev_err(&pdev->dev, "Unable to enable device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	/* Map Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	retval = pci_request_regions(pdev, driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		dev_err(&pdev->dev, "Unable to reserve resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		goto err_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* map registers in BAR 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (!ca91cx42_device->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		goto err_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	/* Check to see if the mapping worked out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	if (data != PCI_VENDOR_ID_TUNDRA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		dev_err(&pdev->dev, "PCI_ID check failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		goto err_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	/* Initialize wait queues & mutual exclusion flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	init_waitqueue_head(&ca91cx42_device->dma_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	init_waitqueue_head(&ca91cx42_device->iack_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	mutex_init(&ca91cx42_device->vme_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	mutex_init(&ca91cx42_device->vme_rmw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	ca91cx42_bridge->parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	strcpy(ca91cx42_bridge->name, driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	/* Setup IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	retval = ca91cx42_irq_init(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	/* Add master windows to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		if (!master_image) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			goto err_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		master_image->parent = ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		spin_lock_init(&master_image->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		master_image->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		master_image->number = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			VME_CRCSR | VME_USER1 | VME_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		memset(&master_image->bus_resource, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		       sizeof(master_image->bus_resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		master_image->kern_base  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		list_add_tail(&master_image->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			&ca91cx42_bridge->master_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	/* Add slave windows to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		if (!slave_image) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			goto err_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		slave_image->parent = ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		mutex_init(&slave_image->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		slave_image->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		slave_image->number = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			VME_USER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		/* Only windows 0 and 4 support A16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		if (i == 0 || i == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			slave_image->address_attr |= VME_A16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		list_add_tail(&slave_image->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 			&ca91cx42_bridge->slave_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	/* Add dma engines to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	for (i = 0; i < CA91C142_MAX_DMA; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		if (!dma_ctrlr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			goto err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		dma_ctrlr->parent = ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		mutex_init(&dma_ctrlr->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		dma_ctrlr->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		dma_ctrlr->number = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			VME_DMA_MEM_TO_VME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		INIT_LIST_HEAD(&dma_ctrlr->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		INIT_LIST_HEAD(&dma_ctrlr->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		list_add_tail(&dma_ctrlr->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			&ca91cx42_bridge->dma_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	/* Add location monitor to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	lm = kmalloc(sizeof(*lm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (!lm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		goto err_lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	lm->parent = ca91cx42_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	mutex_init(&lm->mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	lm->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	lm->number = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	lm->monitors = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	ca91cx42_bridge->master_get = ca91cx42_master_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	ca91cx42_bridge->master_set = ca91cx42_master_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	ca91cx42_bridge->master_read = ca91cx42_master_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	ca91cx42_bridge->master_write = ca91cx42_master_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	ca91cx42_bridge->irq_set = ca91cx42_irq_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	ca91cx42_bridge->lm_set = ca91cx42_lm_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	ca91cx42_bridge->lm_get = ca91cx42_lm_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	ca91cx42_bridge->slot_get = ca91cx42_slot_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	data = ioread32(ca91cx42_device->base + MISC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	dev_info(&pdev->dev, "Slot ID is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		ca91cx42_slot_get(ca91cx42_bridge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	/* Need to save ca91cx42_bridge pointer locally in link list for use in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	 * ca91cx42_remove()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	retval = vme_register_bridge(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		dev_err(&pdev->dev, "Chip Registration failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		goto err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	pci_set_drvdata(pdev, ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) err_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) err_lm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		lm = list_entry(pos, struct vme_lm_resource, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		kfree(lm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) err_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		kfree(dma_ctrlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) err_slave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		slave_image = list_entry(pos, struct vme_slave_resource, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		kfree(slave_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) err_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		master_image = list_entry(pos, struct vme_master_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 			list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		kfree(master_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	ca91cx42_irq_exit(ca91cx42_device, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) err_test:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	iounmap(ca91cx42_device->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) err_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) err_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) err_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	kfree(ca91cx42_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) err_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	kfree(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) err_struct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static void ca91cx42_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	struct list_head *pos = NULL, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	struct vme_master_resource *master_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	struct vme_slave_resource *slave_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	struct vme_dma_resource *dma_ctrlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	struct vme_lm_resource *lm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	struct ca91cx42_driver *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	bridge = ca91cx42_bridge->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	/* Turn off Ints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	iowrite32(0, bridge->base + LINT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	/* Turn off the windows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	iowrite32(0x00800000, bridge->base + LSI0_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	iowrite32(0x00800000, bridge->base + LSI1_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	iowrite32(0x00800000, bridge->base + LSI2_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	iowrite32(0x00800000, bridge->base + LSI3_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	iowrite32(0x00800000, bridge->base + LSI4_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	iowrite32(0x00800000, bridge->base + LSI5_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	iowrite32(0x00800000, bridge->base + LSI6_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	iowrite32(0x00800000, bridge->base + LSI7_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	iowrite32(0x00F00000, bridge->base + VSI0_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	iowrite32(0x00F00000, bridge->base + VSI1_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	iowrite32(0x00F00000, bridge->base + VSI2_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	iowrite32(0x00F00000, bridge->base + VSI3_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	iowrite32(0x00F00000, bridge->base + VSI4_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	iowrite32(0x00F00000, bridge->base + VSI5_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	iowrite32(0x00F00000, bridge->base + VSI6_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	iowrite32(0x00F00000, bridge->base + VSI7_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	vme_unregister_bridge(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		lm = list_entry(pos, struct vme_lm_resource, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		kfree(lm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		kfree(dma_ctrlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		slave_image = list_entry(pos, struct vme_slave_resource, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		kfree(slave_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	/* resources are stored in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		master_image = list_entry(pos, struct vme_master_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		kfree(master_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	ca91cx42_irq_exit(bridge, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	iounmap(bridge->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	kfree(ca91cx42_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) module_pci_driver(ca91cx42_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) MODULE_PARM_DESC(geoid, "Override geographical addressing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) module_param(geoid, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) MODULE_LICENSE("GPL");