^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2010 - Maxim Levitsky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * driver for Ricoh memstick readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/swab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "r592.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static bool r592_enable_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static const char *tpc_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) "MS_TPC_READ_MG_STATUS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) "MS_TPC_READ_LONG_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) "MS_TPC_READ_SHORT_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) "MS_TPC_READ_REG",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) "MS_TPC_READ_QUAD_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) "INVALID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) "MS_TPC_GET_INT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) "MS_TPC_SET_RW_REG_ADRS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "MS_TPC_EX_SET_CMD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "MS_TPC_WRITE_QUAD_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "MS_TPC_WRITE_REG",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "MS_TPC_WRITE_SHORT_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "MS_TPC_WRITE_LONG_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "MS_TPC_SET_CMD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * memstick_debug_get_tpc_name - debug helper that returns string for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * a TPC number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) const char *memstick_debug_get_tpc_name(int tpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return tpc_names[tpc-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EXPORT_SYMBOL(memstick_debug_get_tpc_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Read a register*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline u32 r592_read_reg(struct r592_device *dev, int address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 value = readl(dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dbg_reg("reg #%02d == 0x%08x", address, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Write a register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline void r592_write_reg(struct r592_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int address, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) dbg_reg("reg #%02d <- 0x%08x", address, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) writel(value, dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Reads a big endian DWORD register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 value = __raw_readl(dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) dbg_reg("reg #%02d == 0x%08x", address, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return be32_to_cpu(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Writes a big endian DWORD register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline void r592_write_reg_raw_be(struct r592_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int address, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) dbg_reg("reg #%02d <- 0x%08x", address, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __raw_writel(cpu_to_be32(value), dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Set specific bits in a register (little endian) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline void r592_set_reg_mask(struct r592_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int address, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 reg = readl(dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dbg_reg("reg #%02d |= 0x%08x (old =0x%08x)", address, mask, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) writel(reg | mask , dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Clear specific bits in a register (little endian) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline void r592_clear_reg_mask(struct r592_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int address, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 reg = readl(dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dbg_reg("reg #%02d &= 0x%08x (old = 0x%08x, mask = 0x%08x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) address, ~mask, reg, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) writel(reg & ~mask, dev->mmio + address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Wait for status bits while checking for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long timeout = jiffies + msecs_to_jiffies(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 reg = r592_read_reg(dev, R592_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if ((reg & mask) == wanted_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) reg = r592_read_reg(dev, R592_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if ((reg & mask) == wanted_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (reg & (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Enable/disable device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int r592_enable_device(struct r592_device *dev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dbg("%sabling the device", enable ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Power up the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Perform a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Power down the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) r592_write_reg(dev, R592_POWER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Set serial/parallel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int r592_set_mode(struct r592_device *dev, bool parallel_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!parallel_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) dbg("switching to serial mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Set serial mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dbg("switching to parallel mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* This setting should be set _before_ switch TPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) r592_set_reg_mask(dev, R592_POWER, R592_POWER_20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) r592_clear_reg_mask(dev, R592_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) R592_IO_SERIAL1 | R592_IO_SERIAL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Set the parallel mode now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) dev->parallel_mode = parallel_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Perform a controller reset without powering down the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void r592_host_reset(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) r592_set_mode(dev, dev->parallel_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Disable all hardware interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void r592_clear_interrupts(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Disable & ACK all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Tests if there is an CRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static int r592_test_io_error(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!(r592_read_reg(dev, R592_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) (R592_STATUS_SEND_ERR | R592_STATUS_RECV_ERR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Ensure that FIFO is ready for use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int r592_test_fifo_empty(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dbg("FIFO not ready, trying to reset the device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) r592_host_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) message("FIFO still not ready, giving up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Activates the DMA transfer from to FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void r592_start_dma(struct r592_device *dev, bool is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_lock_irqsave(&dev->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Ack interrupts (just in case) + enable them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Set DMA address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Enable the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) reg |= R592_FIFO_DMA_SETTINGS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) reg |= R592_FIFO_DMA_SETTINGS_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) reg &= ~R592_FIFO_DMA_SETTINGS_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spin_unlock_irqrestore(&dev->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Cleanups DMA related settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void r592_stop_dma(struct r592_device *dev, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) R592_FIFO_DMA_SETTINGS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* This is only a precation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) r592_write_reg(dev, R592_FIFO_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dev->dummy_dma_page_physical_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev->dma_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Test if hardware supports DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void r592_check_dma(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) dev->dma_capable = r592_enable_dma &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) R592_FIFO_DMA_SETTINGS_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* Transfers fifo contents in/out using DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int r592_transfer_fifo_dma(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int len, sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bool is_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!dev->dma_capable || !dev->req->long_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) len = dev->req->sg.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) is_write = dev->req->data_dir == WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (len != R592_LFIFO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dbg_verbose("doing dma transfer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dev->dma_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) reinit_completion(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* TODO: hidden assumption about nenth beeing always 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) message("problem in dma_map_sg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) r592_start_dma(dev, is_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Wait for DMA completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!wait_for_completion_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) &dev->dma_done, msecs_to_jiffies(1000))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) message("DMA timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) r592_stop_dma(dev, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return dev->dma_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Writes the FIFO in 4 byte chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * If length isn't 4 byte aligned, rest of the data if put to a fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * to be written later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Use r592_flush_fifo_write to flush that fifo when writing for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * last time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void r592_write_fifo_pio(struct r592_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned char *buffer, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* flush spill from former write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!kfifo_is_empty(&dev->pio_fifo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u8 tmp[4] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int copy_len = kfifo_in(&dev->pio_fifo, buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!kfifo_is_full(&dev->pio_fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) len -= copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) buffer += copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) copy_len = kfifo_out(&dev->pio_fifo, tmp, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) WARN_ON(copy_len != 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) WARN_ON(!kfifo_is_empty(&dev->pio_fifo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* write full dwords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) while (len >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) buffer += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* put remaining bytes to the spill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) kfifo_in(&dev->pio_fifo, buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Flushes the temporary FIFO used to make aligned DWORD writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void r592_flush_fifo_write(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u8 buffer[4] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (kfifo_is_empty(&dev->pio_fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) len = kfifo_out(&dev->pio_fifo, buffer, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Read a fifo in 4 bytes chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * If input doesn't fit the buffer, it places bytes of last dword in spill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * buffer, so that they don't get lost on last read, just throw these away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void r592_read_fifo_pio(struct r592_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned char *buffer, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u8 tmp[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Read from last spill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!kfifo_is_empty(&dev->pio_fifo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int bytes_copied =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) kfifo_out(&dev->pio_fifo, buffer, min(4, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) buffer += bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) len -= bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!kfifo_is_empty(&dev->pio_fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Reads dwords from FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) while (len >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) buffer += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) kfifo_in(&dev->pio_fifo, tmp, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) len -= kfifo_out(&dev->pio_fifo, buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) WARN_ON(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* Transfers actual data using PIO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static int r592_transfer_fifo_pio(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct sg_mapping_iter miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kfifo_reset(&dev->pio_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!dev->req->long_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) r592_write_fifo_pio(dev, dev->req->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev->req->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) r592_flush_fifo_write(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) r592_read_fifo_pio(dev, dev->req->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dev->req->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Do the transfer fifo<->memory*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) while (sg_miter_next(&miter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) r592_write_fifo_pio(dev, miter.addr, miter.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) r592_read_fifo_pio(dev, miter.addr, miter.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Write last few non aligned bytes*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) r592_flush_fifo_write(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) sg_miter_stop(&miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* Executes one TPC (data is read/written from small or large fifo) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void r592_execute_tpc(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) bool is_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int len, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u32 status, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!dev->req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) message("BUG: tpc execution without request!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) len = dev->req->long_data ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dev->req->sg.length : dev->req->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Ensure that FIFO can hold the input data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (len > R592_LFIFO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) message("IO: hardware doesn't support TPCs longer that 512");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) error = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dbg("IO: refusing to send TPC because card is absent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dbg("IO: executing %s LEN=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) memstick_debug_get_tpc_name(dev->req->tpc), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Set IO direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) error = r592_test_fifo_empty(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Transfer write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) error = r592_transfer_fifo_dma(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (error == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) error = r592_transfer_fifo_pio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Trigger the TPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) reg = (len << R592_TPC_EXEC_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) R592_TPC_EXEC_BIG_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) r592_write_reg(dev, R592_TPC_EXEC, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Wait for TPC completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) status = R592_STATUS_RDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (dev->req->need_card_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) status |= R592_STATUS_CED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) error = r592_wait_status(dev, status, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) message("card didn't respond");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Test IO errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) error = r592_test_io_error(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dbg("IO error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Read data from FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) error = r592_transfer_fifo_dma(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (error == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) error = r592_transfer_fifo_pio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* read INT reg. This can be shortened with shifts, but that way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) its more readable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (dev->parallel_mode && dev->req->need_card_int) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) dev->req->int_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) status = r592_read_reg(dev, R592_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (status & R592_STATUS_P_CMDNACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dev->req->int_reg |= MEMSTICK_INT_CMDNAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (status & R592_STATUS_P_BREQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) dev->req->int_reg |= MEMSTICK_INT_BREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (status & R592_STATUS_P_INTERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dev->req->int_reg |= MEMSTICK_INT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (status & R592_STATUS_P_CED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev->req->int_reg |= MEMSTICK_INT_CED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dbg("FIFO read error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) dev->req->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Main request processing thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int r592_process_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct r592_device *dev = (struct r592_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) spin_lock_irqsave(&dev->io_thread_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) error = memstick_next_req(dev->host, &dev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_unlock_irqrestore(&dev->io_thread_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (error == -ENXIO || error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dbg_verbose("IO: done IO, sleeping");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dbg("IO: unknown error from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) "memstick_next_req %d", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) r592_execute_tpc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Reprogram chip to detect change in card state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* eg, if card is detected, arm it to detect removal, and vice versa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static void r592_update_card_detect(struct r592_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u32 reg = r592_read_reg(dev, R592_REG_MSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) bool card_detected = reg & R592_REG_MSC_PRSNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dbg("update card detect. card state: %s", card_detected ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) "present" : "absent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) reg &= ~((R592_REG_MSC_IRQ_REMOVE | R592_REG_MSC_IRQ_INSERT) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (card_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) reg |= (R592_REG_MSC_IRQ_REMOVE << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) reg |= (R592_REG_MSC_IRQ_INSERT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) r592_write_reg(dev, R592_REG_MSC, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Timer routine that fires 1 second after last card detection event, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void r592_detect_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct r592_device *dev = from_timer(dev, t, detect_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) r592_update_card_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) memstick_detect_change(dev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static irqreturn_t r592_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct r592_device *dev = (struct r592_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) u16 irq_enable, irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_lock_irqsave(&dev->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) reg = r592_read_reg(dev, R592_REG_MSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) irq_enable = reg >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) irq_status = reg & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Ack the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) reg &= ~irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) r592_write_reg(dev, R592_REG_MSC, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Get the IRQ status minus bits that aren't enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) irq_status &= (irq_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Due to limitation of memstick core, we don't look at bits that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) indicate that card was removed/inserted and/or present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (irq_status & (R592_REG_MSC_IRQ_INSERT | R592_REG_MSC_IRQ_REMOVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) bool card_was_added = irq_status & R592_REG_MSC_IRQ_INSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) message("IRQ: card %s", card_was_added ? "added" : "removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mod_timer(&dev->detect_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) jiffies + msecs_to_jiffies(card_was_added ? 500 : 50));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (irq_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) (R592_REG_MSC_FIFO_DMA_DONE | R592_REG_MSC_FIFO_DMA_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (irq_status & R592_REG_MSC_FIFO_DMA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) message("IRQ: DMA error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dbg_verbose("IRQ: dma done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) r592_stop_dma(dev, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) complete(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_unlock_irqrestore(&dev->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* External inteface: set settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static int r592_set_param(struct memstick_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) enum memstick_param param, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct r592_device *dev = memstick_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case MEMSTICK_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) switch (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) case MEMSTICK_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return r592_enable_device(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) case MEMSTICK_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return r592_enable_device(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) case MEMSTICK_INTERFACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) switch (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) case MEMSTICK_SERIAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return r592_set_mode(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case MEMSTICK_PAR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return r592_set_mode(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* External interface: submit requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void r592_submit_req(struct memstick_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct r592_device *dev = memstick_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (dev->req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) spin_lock_irqsave(&dev->io_thread_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (wake_up_process(dev->io_thread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dbg_verbose("IO thread woken to process requests");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) spin_unlock_irqrestore(&dev->io_thread_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static const struct pci_device_id r592_pci_id_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) { PCI_VDEVICE(RICOH, 0x0592), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Main entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct memstick_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct r592_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Allocate memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) dev = memstick_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dev->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dev->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* pci initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) error = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto error3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) error = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) goto error3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dev->mmio = pci_ioremap_bar(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!dev->mmio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto error4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_lock_init(&dev->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spin_lock_init(&dev->io_thread_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) init_completion(&dev->dma_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) INIT_KFIFO(dev->pio_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) timer_setup(&dev->detect_timer, r592_detect_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Host initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) host->caps = MEMSTICK_CAP_PAR4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) host->request = r592_submit_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) host->set_param = r592_set_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) r592_check_dma(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (IS_ERR(dev->io_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) error = PTR_ERR(dev->io_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto error5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* This is just a precation, so don't fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) &dev->dummy_dma_page_physical_address, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) r592_stop_dma(dev , 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) error = request_irq(dev->irq, &r592_irq, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) DRV_NAME, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) goto error6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) r592_update_card_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) error = memstick_add_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto error7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) message("driver successfully loaded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) error7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) error6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (dev->dummy_dma_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dev->dummy_dma_page_physical_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) kthread_stop(dev->io_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) error5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) iounmap(dev->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) error4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) error3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) error2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) memstick_free_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void r592_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct r592_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Stop the processing thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) That ensures that we won't take any more requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) kthread_stop(dev->io_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) r592_enable_device(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) while (!error && dev->req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) dev->req->error = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) error = memstick_next_req(dev->host, &dev->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) memstick_remove_host(dev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (dev->dummy_dma_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dev->dummy_dma_page_physical_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) iounmap(dev->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) memstick_free_host(dev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static int r592_suspend(struct device *core_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct r592_device *dev = dev_get_drvdata(core_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) r592_clear_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) memstick_suspend_host(dev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) del_timer_sync(&dev->detect_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int r592_resume(struct device *core_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct r592_device *dev = dev_get_drvdata(core_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) r592_clear_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) r592_enable_device(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) memstick_resume_host(dev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) r592_update_card_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static SIMPLE_DEV_PM_OPS(r592_pm_ops, r592_suspend, r592_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) MODULE_DEVICE_TABLE(pci, r592_pci_id_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static struct pci_driver r852_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) .id_table = r592_pci_id_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) .probe = r592_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) .remove = r592_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) .driver.pm = &r592_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) module_pci_driver(r852_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) module_param(debug, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) MODULE_PARM_DESC(debug, "Debug level (0-3)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) MODULE_DESCRIPTION("Ricoh R5C592 Memstick/Memstick PRO card reader driver");