^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Device driver for the Cuda and Egret system controllers found on PowerMacs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * and 68k Macs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * The Cuda or Egret is a 6805 microcontroller interfaced to the 6522 VIA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This MCU controls system power, Parameter RAM, Real Time Clock and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Apple Desktop Bus (ADB) that connects to the keyboard and mouse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <stdarg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/adb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/cuda.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/macintosh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/macints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/mac_via.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static volatile unsigned char __iomem *via;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static DEFINE_SPINLOCK(cuda_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* VIA registers - spaced 0x200 bytes apart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define RS 0x200 /* skip between registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define B 0 /* B-side data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define A RS /* A-side data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DIRB (2*RS) /* B-side direction (1=output) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DIRA (3*RS) /* A-side direction (1=output) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SR (10*RS) /* Shift register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define ACR (11*RS) /* Auxiliary control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PCR (12*RS) /* Peripheral control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define IFR (13*RS) /* Interrupt flag register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define IER (14*RS) /* Interrupt enable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define ANH (15*RS) /* A-side data, no handshake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * When the Cuda design replaced the Egret, some signal names and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * logic sense changed. They all serve the same purposes, however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * VIA pin | Egret pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * ----------------+------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * PB3 (input) | Transceiver session (active low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * PB4 (output) | VIA full (active high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * PB5 (output) | System session (active high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * VIA pin | Cuda pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * ----------------+------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * PB3 (input) | Transfer request (active low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * PB4 (output) | Byte acknowledge (active low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * PB5 (output) | Transfer in progress (active low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Bits in Port B data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define TREQ 0x08 /* Transfer request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define TACK 0x10 /* Transfer acknowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define TIP 0x20 /* Transfer in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Bits in ACR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SR_CTRL 0x1c /* Shift register control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SR_EXT 0x0c /* Shift on external clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SR_OUT 0x10 /* Shift out if 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Bits in IFR and IER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define IER_SET 0x80 /* set bits in IER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define IER_CLR 0 /* clear bits in IER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SR_INT 0x04 /* Shift register full/empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Duration of byte acknowledgement pulse (us) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define EGRET_TACK_ASSERTED_DELAY 300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define EGRET_TACK_NEGATED_DELAY 400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Interval from interrupt to start of session (us) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define EGRET_SESSION_DELAY 450
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define mcu_is_egret false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static bool mcu_is_egret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static inline bool TREQ_asserted(u8 portb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return !(portb & TREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline void assert_TIP(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (mcu_is_egret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) udelay(EGRET_SESSION_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) out_8(&via[B], in_8(&via[B]) | TIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) out_8(&via[B], in_8(&via[B]) & ~TIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void assert_TIP_and_TACK(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (mcu_is_egret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) udelay(EGRET_SESSION_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) out_8(&via[B], in_8(&via[B]) | TIP | TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) out_8(&via[B], in_8(&via[B]) & ~(TIP | TACK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline void assert_TACK(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (mcu_is_egret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) udelay(EGRET_TACK_NEGATED_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) out_8(&via[B], in_8(&via[B]) | TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) out_8(&via[B], in_8(&via[B]) & ~TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void toggle_TACK(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) out_8(&via[B], in_8(&via[B]) ^ TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void negate_TACK(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (mcu_is_egret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) udelay(EGRET_TACK_ASSERTED_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) out_8(&via[B], in_8(&via[B]) & ~TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) out_8(&via[B], in_8(&via[B]) | TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline void negate_TIP_and_TACK(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (mcu_is_egret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) udelay(EGRET_TACK_ASSERTED_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) out_8(&via[B], in_8(&via[B]) & ~(TIP | TACK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) out_8(&via[B], in_8(&via[B]) | TIP | TACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static enum cuda_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sent_first_byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) sending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) reading,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) read_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) awaiting_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) } cuda_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static struct adb_request *current_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static struct adb_request *last_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static unsigned char cuda_rbuf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static unsigned char *reply_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int reading_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static int data_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int cuda_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static struct device_node *vias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int cuda_fully_inited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #ifdef CONFIG_ADB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int cuda_probe(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int cuda_send_request(struct adb_request *req, int sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int cuda_adb_autopoll(int devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int cuda_reset_adb_bus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif /* CONFIG_ADB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int cuda_init_via(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void cuda_start(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static irqreturn_t cuda_interrupt(int irq, void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void cuda_input(unsigned char *buf, int nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void cuda_poll(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int cuda_write(struct adb_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int cuda_request(struct adb_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void (*done)(struct adb_request *), int nbytes, ...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #ifdef CONFIG_ADB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct adb_driver via_cuda_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .name = "CUDA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .probe = cuda_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .send_request = cuda_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) .autopoll = cuda_adb_autopoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .poll = cuda_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .reset_bus = cuda_reset_adb_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #endif /* CONFIG_ADB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #ifdef CONFIG_MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int __init find_via_cuda(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct adb_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (macintosh_config->adb_type != MAC_ADB_CUDA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) macintosh_config->adb_type != MAC_ADB_EGRET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) via = via1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) cuda_state = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mcu_is_egret = macintosh_config->adb_type == MAC_ADB_EGRET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) err = cuda_init_via();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) printk(KERN_ERR "cuda_init_via() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) via = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* enable autopoll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) while (!req.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int __init find_via_cuda(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct adb_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) phys_addr_t taddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const u32 *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (vias != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) vias = of_find_node_by_name(NULL, "via-cuda");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (vias == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) reg = of_get_property(vias, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (reg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) printk(KERN_ERR "via-cuda: No \"reg\" property !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) taddr = of_translate_address(vias, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (taddr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) printk(KERN_ERR "via-cuda: Can't translate address !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) via = ioremap(taddr, 0x2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (via == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) printk(KERN_ERR "via-cuda: Can't map address !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) cuda_state = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) sys_ctrler = SYS_CTRLER_CUDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err = cuda_init_via();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) printk(KERN_ERR "cuda_init_via() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) via = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Clear and enable interrupts, but only on PPC. On 68K it's done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* for us by the main VIA driver in arch/m68k/mac/via.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) out_8(&via[IFR], 0x7f); /* clear interrupts by writing 1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out_8(&via[IER], IER_SET|SR_INT); /* enable interrupt from SR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* enable autopoll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) while (!req.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) of_node_put(vias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) vias = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif /* !defined CONFIG_MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int __init via_cuda_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (via == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifdef CONFIG_MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) cuda_irq = IRQ_MAC_ADB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) cuda_irq = irq_of_parse_and_map(vias, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!cuda_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) printk(KERN_ERR "via-cuda: can't map interrupts for %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) vias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (request_irq(cuda_irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) printk(KERN_ERR "via-cuda: can't request irq %d\n", cuda_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pr_info("Macintosh Cuda and Egret driver.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) cuda_fully_inited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) device_initcall(via_cuda_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #ifdef CONFIG_ADB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) cuda_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (sys_ctrler != SYS_CTRLER_CUDA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (macintosh_config->adb_type != MAC_ADB_CUDA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) macintosh_config->adb_type != MAC_ADB_EGRET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (via == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #endif /* CONFIG_ADB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int __init sync_egret(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (TREQ_asserted(in_8(&via[B]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Complete the inbound transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) assert_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) negate_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) assert_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!TREQ_asserted(in_8(&via[B])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) negate_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else if (in_8(&via[B]) & TIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Terminate the outbound transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) negate_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) assert_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) negate_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Clear shift register interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (in_8(&via[IFR]) & SR_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define WAIT_FOR(cond, what) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) for (x = 1000; !(cond); --x) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (x == 0) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pr_err("Timeout waiting for " what "\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return -ENXIO; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) udelay(100); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __init cuda_init_via(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #ifdef CONFIG_PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) out_8(&via[IER], 0x7f); /* disable interrupts from VIA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) (void)in_8(&via[IER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) out_8(&via[IER], SR_INT); /* disable SR interrupt from VIA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) out_8(&via[DIRB], (in_8(&via[DIRB]) | TACK | TIP) & ~TREQ); /* TACK & TIP out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out_8(&via[ACR], (in_8(&via[ACR]) & ~SR_CTRL) | SR_EXT); /* SR data in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) (void)in_8(&via[SR]); /* clear any left-over data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return sync_egret();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) negate_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* delay 4ms and then clear any pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) mdelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) out_8(&via[IFR], SR_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* sync with the CUDA - assert TACK without TIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) assert_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* wait for the CUDA to assert TREQ in response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) WAIT_FOR(TREQ_asserted(in_8(&via[B])), "CUDA response to sync");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* wait for the interrupt and then clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (2)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) out_8(&via[IFR], SR_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* finish the sync by negating TACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) negate_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* wait for the CUDA to negate TREQ and the corresponding interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) WAIT_FOR(!TREQ_asserted(in_8(&via[B])), "CUDA response to sync (3)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (4)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) out_8(&via[IFR], SR_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #ifdef CONFIG_ADB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Send an ADB command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) cuda_send_request(struct adb_request *req, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if ((via == NULL) || !cuda_fully_inited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) req->complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) req->reply_expected = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) i = cuda_write(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) while (!req->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Enable/disable autopolling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) cuda_adb_autopoll(int devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct adb_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if ((via == NULL) || !cuda_fully_inited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, (devs? 1: 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) while (!req.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* Reset adb bus - how do we do this?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) cuda_reset_adb_bus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct adb_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if ((via == NULL) || !cuda_fully_inited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) cuda_request(&req, NULL, 2, ADB_PACKET, 0); /* maybe? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) while (!req.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #endif /* CONFIG_ADB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Construct and send a cuda request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) cuda_request(struct adb_request *req, void (*done)(struct adb_request *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int nbytes, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) va_list list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (via == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) req->complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) req->nbytes = nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) req->done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) va_start(list, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) for (i = 0; i < nbytes; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) req->data[i] = va_arg(list, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) va_end(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) req->reply_expected = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return cuda_write(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) EXPORT_SYMBOL(cuda_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) cuda_write(struct adb_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) req->complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) req->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) req->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) req->complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) req->reply_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock_irqsave(&cuda_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (current_req != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) last_req->next = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) last_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) current_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) last_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (cuda_state == idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) cuda_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) spin_unlock_irqrestore(&cuda_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) cuda_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* assert cuda_state == idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (current_req == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) data_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (TREQ_asserted(in_8(&via[B])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return; /* a byte is coming in from the CUDA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* set the shift register to shift out and send a byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) out_8(&via[ACR], in_8(&via[ACR]) | SR_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) out_8(&via[SR], current_req->data[data_index++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) assert_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) assert_TIP();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) cuda_state = sent_first_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cuda_poll(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cuda_interrupt(0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) EXPORT_SYMBOL(cuda_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #define ARRAY_FULL(a, p) ((p) - (a) == ARRAY_SIZE(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cuda_interrupt(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct adb_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned char ibuf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int ibuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) bool full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) spin_lock_irqsave(&cuda_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* On powermacs, this handler is registered for the VIA IRQ. But they use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * just the shift register IRQ -- other VIA interrupt sources are disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * On m68k macs, the VIA IRQ sources are dispatched individually. Unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * we are polling, the shift register IRQ flag has already been cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #ifdef CONFIG_MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if ((in_8(&via[IFR]) & SR_INT) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_unlock_irqrestore(&cuda_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) out_8(&via[IFR], SR_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) status = in_8(&via[B]) & (TIP | TACK | TREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) switch (cuda_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) case idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* System controller has unsolicited data for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) idle_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) assert_TIP();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) cuda_state = reading;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) reply_ptr = cuda_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) reading_reply = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case awaiting_reply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* System controller has reply data for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) assert_TIP();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) cuda_state = reading;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) reply_ptr = current_req->reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) reading_reply = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case sent_first_byte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (TREQ_asserted(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) negate_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) cuda_state = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Egret does not raise an "aborted" interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto idle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) out_8(&via[SR], current_req->data[data_index++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) toggle_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) assert_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) cuda_state = sending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) case sending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) req = current_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (data_index >= req->nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) negate_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) req->sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (req->reply_expected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) cuda_state = awaiting_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) current_req = req->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* not sure about this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) cuda_state = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) cuda_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) out_8(&via[SR], req->data[data_index++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) toggle_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) assert_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) case reading:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) full = reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) : ARRAY_FULL(cuda_rbuf, reply_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *reply_ptr++ = in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!TREQ_asserted(status) || full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) assert_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* that's all folks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) negate_TIP_and_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) cuda_state = read_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Egret does not raise a "read done" interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) goto read_done_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) toggle_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (mcu_is_egret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) negate_TACK();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) case read_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) (void)in_8(&via[SR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) read_done_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (reading_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) req = current_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) req->reply_len = reply_ptr - req->reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (req->data[0] == ADB_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Have to adjust the reply from ADB commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* the 0x2 bit indicates no response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) req->reply_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* leave just the command and result bytes in the reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) req->reply_len -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) memmove(req->reply, req->reply + 2, req->reply_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) current_req = req->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) reading_reply = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* This is tricky. We must break the spinlock to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * cuda_input. However, doing so means we might get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * re-entered from another CPU getting an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * or calling cuda_poll(). I ended up using the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * (it's only for 16 bytes) and moving the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * call to cuda_input to outside of the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ibuf_len = reply_ptr - cuda_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) memcpy(ibuf, cuda_rbuf, ibuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) reply_ptr = cuda_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) cuda_state = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) cuda_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (cuda_state == idle && TREQ_asserted(in_8(&via[B]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) assert_TIP();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) cuda_state = reading;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) pr_err("cuda_interrupt: unknown cuda_state %d?\n", cuda_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock_irqrestore(&cuda_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (complete && req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) void (*done)(struct adb_request *) = req->done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) req->complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Here, we assume that if the request has a done member, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * struct request will survive to setting req->complete to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) (*done)(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (ibuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) cuda_input(ibuf, ibuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cuda_input(unsigned char *buf, int nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) switch (buf[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) case ADB_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) #ifdef CONFIG_XMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (nb == 5 && buf[2] == 0x2c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) extern int xmon_wants_key, xmon_adb_keycode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (xmon_wants_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) xmon_adb_keycode = buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) #endif /* CONFIG_XMON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) #ifdef CONFIG_ADB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) adb_input(buf+2, nb-2, buf[1] & 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) #endif /* CONFIG_ADB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) case TIMER_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Egret sends these periodically. Might be useful as a 'heartbeat'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * to trigger a recovery for the VIA shift register errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) print_hex_dump(KERN_INFO, "cuda_input: ", DUMP_PREFIX_NONE, 32, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) buf, nb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* Offset between Unix time (1970-based) and Mac time (1904-based) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) #define RTC_OFFSET 2082844800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) time64_t cuda_get_time(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct adb_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) u32 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) while (!req.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (req.reply_len != 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) now = (req.reply[3] << 24) + (req.reply[4] << 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) (req.reply[5] << 8) + req.reply[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return (time64_t)now - RTC_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int cuda_set_rtc_time(struct rtc_time *tm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u32 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct adb_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) now >> 24, now >> 16, now >> 8, now) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) while (!req.complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) cuda_poll();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if ((req.reply_len != 3) && (req.reply_len != 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }