Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for OHCI 1394 controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/firewire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/firewire-constants.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <asm/pmac_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "ohci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define ohci_info(ohci, f, args...)	dev_info(ohci->card.device, f, ##args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define ohci_notice(ohci, f, args...)	dev_notice(ohci->card.device, f, ##args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define ohci_err(ohci, f, args...)	dev_err(ohci->card.device, f, ##args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define DESCRIPTOR_OUTPUT_MORE		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define DESCRIPTOR_OUTPUT_LAST		(1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define DESCRIPTOR_INPUT_MORE		(2 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define DESCRIPTOR_INPUT_LAST		(3 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define DESCRIPTOR_STATUS		(1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define DESCRIPTOR_KEY_IMMEDIATE	(2 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define DESCRIPTOR_PING			(1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define DESCRIPTOR_YY			(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define DESCRIPTOR_NO_IRQ		(0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define DESCRIPTOR_IRQ_ERROR		(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define DESCRIPTOR_IRQ_ALWAYS		(3 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define DESCRIPTOR_BRANCH_ALWAYS	(3 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define DESCRIPTOR_WAIT			(3 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define DESCRIPTOR_CMD			(0xf << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) struct descriptor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	__le16 req_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	__le16 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	__le32 data_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	__le32 branch_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	__le16 res_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	__le16 transfer_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) } __attribute__((aligned(16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define CONTROL_SET(regs)	(regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define CONTROL_CLEAR(regs)	((regs) + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define COMMAND_PTR(regs)	((regs) + 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define CONTEXT_MATCH(regs)	((regs) + 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define AR_BUFFER_SIZE	(32*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define AR_BUFFERS_MIN	DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /* we need at least two pages for proper list management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define AR_BUFFERS	(AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define MAX_ASYNC_PAYLOAD	4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define MAX_AR_PACKET_SIZE	(16 + MAX_ASYNC_PAYLOAD + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define AR_WRAPAROUND_PAGES	DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) struct ar_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct fw_ohci *ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct page *pages[AR_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	void *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct descriptor *descriptors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	dma_addr_t descriptors_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	void *pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	unsigned int last_buffer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	u32 regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) struct context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) typedef int (*descriptor_callback_t)(struct context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 				     struct descriptor *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 				     struct descriptor *last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * A buffer that contains a block of DMA-able coherent memory used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * storing a portion of a DMA descriptor program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) struct descriptor_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	dma_addr_t buffer_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	size_t buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	size_t used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct descriptor buffer[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct fw_ohci *ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	u32 regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	int total_allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	u32 current_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	bool running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	bool flushing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 * List of page-sized buffers for storing DMA descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 * Head of list contains buffers in use and tail of list contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	 * free buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct list_head buffer_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * Pointer to a buffer inside buffer_list that contains the tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * end of the current DMA program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	struct descriptor_buffer *buffer_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * The descriptor containing the branch address of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 * descriptor that has not yet been filled by the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct descriptor *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	 * The last descriptor block in the DMA program. It contains the branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	 * address that must be updated upon appending a new descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	struct descriptor *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	int prev_z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	descriptor_callback_t callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define IT_HEADER_SY(v)          ((v) <<  0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define IT_HEADER_TCODE(v)       ((v) <<  4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define IT_HEADER_CHANNEL(v)     ((v) <<  8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define IT_HEADER_TAG(v)         ((v) << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define IT_HEADER_SPEED(v)       ((v) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) struct iso_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct fw_iso_context base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct context context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	void *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	size_t header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	unsigned long flushing_completions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	u32 mc_buffer_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	u16 mc_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	u16 last_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u8 sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u8 tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define CONFIG_ROM_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) struct fw_ohci {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct fw_card card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	__iomem char *registers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	int node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	int generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	int request_generation;	/* for timestamping incoming requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	unsigned quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	unsigned int pri_req_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u32 bus_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	bool bus_time_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	bool is_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	bool csr_state_setclear_abdicate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	int n_ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	int n_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	 * Spinlock for accessing fw_ohci data.  Never call out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * this driver with this lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	struct mutex phy_reg_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	void *misc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	dma_addr_t misc_buffer_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct ar_context ar_request_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	struct ar_context ar_response_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	struct context at_request_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct context at_response_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	u32 it_context_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	u32 it_context_mask;     /* unoccupied IT contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct iso_context *it_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	u64 ir_context_channels; /* unoccupied channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	u32 ir_context_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	u32 ir_context_mask;     /* unoccupied IR contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct iso_context *ir_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	u64 mc_channels; /* channels in use by the multichannel IR context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	bool mc_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	__be32    *config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	dma_addr_t config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	__be32    *next_config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	dma_addr_t next_config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	__be32     next_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	__le32    *self_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	dma_addr_t self_id_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct work_struct bus_reset_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	u32 self_id_buffer[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static struct workqueue_struct *selfid_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static inline struct fw_ohci *fw_ohci(struct fw_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	return container_of(card, struct fw_ohci, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define IT_CONTEXT_CYCLE_MATCH_ENABLE	0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define IR_CONTEXT_BUFFER_FILL		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) #define IR_CONTEXT_ISOCH_HEADER		0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define IR_CONTEXT_CYCLE_MATCH_ENABLE	0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define IR_CONTEXT_MULTI_CHANNEL_MODE	0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) #define IR_CONTEXT_DUAL_BUFFER_MODE	0x08000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #define CONTEXT_RUN	0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define CONTEXT_WAKE	0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) #define CONTEXT_DEAD	0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #define CONTEXT_ACTIVE	0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #define OHCI1394_MAX_AT_REQ_RETRIES	0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define OHCI1394_MAX_AT_RESP_RETRIES	0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #define OHCI1394_MAX_PHYS_RESP_RETRIES	0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) #define OHCI1394_REGISTER_SIZE		0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #define OHCI1394_PCI_HCI_Control	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) #define SELF_ID_BUF_SIZE		0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #define OHCI_TCODE_PHY_PACKET		0x0e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #define OHCI_VERSION_1_1		0x010010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static char ohci_driver_name[] = KBUILD_MODNAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) #define PCI_VENDOR_ID_PINNACLE_SYSTEMS	0x11bd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) #define PCI_DEVICE_ID_AGERE_FW643	0x5901
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) #define PCI_DEVICE_ID_CREATIVE_SB1394	0x4001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #define PCI_DEVICE_ID_JMICRON_JMB38X_FW	0x2380
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) #define PCI_DEVICE_ID_TI_TSB12LV22	0x8009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #define PCI_DEVICE_ID_TI_TSB12LV26	0x8020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) #define PCI_DEVICE_ID_TI_TSB82AA2	0x8025
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) #define PCI_DEVICE_ID_VIA_VT630X	0x3044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) #define PCI_REV_ID_VIA_VT6306		0x46
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) #define PCI_DEVICE_ID_VIA_VT6315	0x3403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #define QUIRK_CYCLE_TIMER		0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #define QUIRK_RESET_PACKET		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) #define QUIRK_BE_HEADERS		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #define QUIRK_NO_1394A			0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #define QUIRK_NO_MSI			0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) #define QUIRK_TI_SLLZ059		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #define QUIRK_IR_WAKE			0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) /* In case of multiple matches in ohci_quirks[], only the first one is used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	unsigned short vendor, device, revision, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) } ohci_quirks[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	{PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		QUIRK_CYCLE_TIMER},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		QUIRK_BE_HEADERS},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		QUIRK_RESET_PACKET},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		QUIRK_CYCLE_TIMER},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	{PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		QUIRK_RESET_PACKET},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) /* This overrides anything that was found in ohci_quirks[]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static int param_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) module_param_named(quirks, param_quirks, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	", nonatomic cycle timer = "	__stringify(QUIRK_CYCLE_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	", reset packet generation = "	__stringify(QUIRK_RESET_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	", AR/selfID endianness = "	__stringify(QUIRK_BE_HEADERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	", no 1394a enhancements = "	__stringify(QUIRK_NO_1394A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	", disable MSI = "		__stringify(QUIRK_NO_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	", TI SLLZ059 erratum = "	__stringify(QUIRK_TI_SLLZ059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	", IR wake unreliable = "	__stringify(QUIRK_IR_WAKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	")");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) #define OHCI_PARAM_DEBUG_AT_AR		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) #define OHCI_PARAM_DEBUG_SELFIDS	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) #define OHCI_PARAM_DEBUG_IRQS		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) #define OHCI_PARAM_DEBUG_BUSRESETS	8 /* only effective before chip init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static int param_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) module_param_named(debug, param_debug, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	", AT/AR events = "	__stringify(OHCI_PARAM_DEBUG_AT_AR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	", self-IDs = "		__stringify(OHCI_PARAM_DEBUG_SELFIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	", IRQs = "		__stringify(OHCI_PARAM_DEBUG_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	", busReset events = "	__stringify(OHCI_PARAM_DEBUG_BUSRESETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	", or a combination, or all = -1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static bool param_remote_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) module_param_named(remote_dma, param_remote_dma, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static void log_irqs(struct fw_ohci *ohci, u32 evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (likely(!(param_debug &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			(OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	    !(evt & OHCI1394_busReset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	    evt & OHCI1394_selfIDComplete	? " selfID"		: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	    evt & OHCI1394_RQPkt		? " AR_req"		: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	    evt & OHCI1394_RSPkt		? " AR_resp"		: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	    evt & OHCI1394_reqTxComplete	? " AT_req"		: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	    evt & OHCI1394_respTxComplete	? " AT_resp"		: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	    evt & OHCI1394_isochRx		? " IR"			: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	    evt & OHCI1394_isochTx		? " IT"			: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	    evt & OHCI1394_postedWriteErr	? " postedWriteErr"	: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	    evt & OHCI1394_cycleTooLong		? " cycleTooLong"	: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	    evt & OHCI1394_cycle64Seconds	? " cycle64Seconds"	: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	    evt & OHCI1394_cycleInconsistent	? " cycleInconsistent"	: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	    evt & OHCI1394_regAccessFail	? " regAccessFail"	: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	    evt & OHCI1394_unrecoverableError	? " unrecoverableError"	: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	    evt & OHCI1394_busReset		? " busReset"		: "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	    evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		    OHCI1394_RSPkt | OHCI1394_reqTxComplete |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		    OHCI1394_respTxComplete | OHCI1394_isochRx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		    OHCI1394_isochTx | OHCI1394_postedWriteErr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		    OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		    OHCI1394_cycleInconsistent |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		    OHCI1394_regAccessFail | OHCI1394_busReset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 						? " ?"			: "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static const char *speed[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	[0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static const char *power[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	[0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	[4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static const char port[] = { '.', '-', 'p', 'c', };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static char _p(u32 *s, int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return port[*s >> shift & 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	u32 *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		    self_id_count, generation, ohci->node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	for (s = ohci->self_id_buffer; self_id_count--; ++s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		if ((*s & 1 << 23) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			    "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			    *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			    speed[*s >> 14 & 3], *s >> 16 & 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			    power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			    *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			    "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			    *s, *s >> 24 & 63,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			    _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			    _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) static const char *evts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	[0x00] = "evt_no_status",	[0x01] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	[0x02] = "evt_long_packet",	[0x03] = "evt_missing_ack",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	[0x04] = "evt_underrun",	[0x05] = "evt_overrun",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	[0x06] = "evt_descriptor_read",	[0x07] = "evt_data_read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	[0x08] = "evt_data_write",	[0x09] = "evt_bus_reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	[0x0a] = "evt_timeout",		[0x0b] = "evt_tcode_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	[0x0c] = "-reserved-",		[0x0d] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	[0x0e] = "evt_unknown",		[0x0f] = "evt_flushed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	[0x10] = "-reserved-",		[0x11] = "ack_complete",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	[0x12] = "ack_pending ",	[0x13] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	[0x14] = "ack_busy_X",		[0x15] = "ack_busy_A",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	[0x16] = "ack_busy_B",		[0x17] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	[0x18] = "-reserved-",		[0x19] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	[0x1a] = "-reserved-",		[0x1b] = "ack_tardy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	[0x1c] = "-reserved-",		[0x1d] = "ack_data_error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	[0x1e] = "ack_type_error",	[0x1f] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	[0x20] = "pending/cancelled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static const char *tcodes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	[0x0] = "QW req",		[0x1] = "BW req",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	[0x2] = "W resp",		[0x3] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	[0x4] = "QR req",		[0x5] = "BR req",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	[0x6] = "QR resp",		[0x7] = "BR resp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	[0x8] = "cycle start",		[0x9] = "Lk req",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	[0xa] = "async stream packet",	[0xb] = "Lk resp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	[0xc] = "-reserved-",		[0xd] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	[0xe] = "link internal",	[0xf] = "-reserved-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) static void log_ar_at_event(struct fw_ohci *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			    char dir, int speed, u32 *header, int evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	int tcode = header[0] >> 4 & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	char specific[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (unlikely(evt >= ARRAY_SIZE(evts)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			evt = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (evt == OHCI1394_evt_bus_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			    dir, (header[2] >> 16) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	case 0x0: case 0x6: case 0x8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		snprintf(specific, sizeof(specific), " = %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			 be32_to_cpu((__force __be32)header[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		snprintf(specific, sizeof(specific), " %x,%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			 header[3] >> 16, header[3] & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		specific[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	case 0xa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		ohci_notice(ohci, "A%c %s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			    dir, evts[evt], tcodes[tcode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	case 0xe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			    dir, evts[evt], header[1], header[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			    "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			    dir, speed, header[0] >> 10 & 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			    header[1] >> 16, header[0] >> 16, evts[evt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			    tcodes[tcode], header[1] & 0xffff, header[2], specific);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			    "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			    dir, speed, header[0] >> 10 & 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			    header[1] >> 16, header[0] >> 16, evts[evt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			    tcodes[tcode], specific);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	writel(data, ohci->registers + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	return readl(ohci->registers + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) static inline void flush_writes(const struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* Do a dummy read to flush writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	reg_read(ohci, OHCI1394_Version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * Beware!  read_phy_reg(), write_phy_reg(), update_phy_reg(), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  * directly.  Exceptions are intrinsically serialized contexts like pci_probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static int read_phy_reg(struct fw_ohci *ohci, int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	for (i = 0; i < 3 + 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		val = reg_read(ohci, OHCI1394_PhyControl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		if (!~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			return -ENODEV; /* Card was ejected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (val & OHCI1394_PhyControl_ReadDone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			return OHCI1394_PhyControl_ReadData(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		 * Try a few times without waiting.  Sleeping is necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		 * only when the link/PHY interface is busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if (i >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	ohci_err(ohci, "failed to read phy reg %d\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	reg_write(ohci, OHCI1394_PhyControl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		  OHCI1394_PhyControl_Write(addr, val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	for (i = 0; i < 3 + 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		val = reg_read(ohci, OHCI1394_PhyControl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		if (!~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			return -ENODEV; /* Card was ejected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		if (!(val & OHCI1394_PhyControl_WritePending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		if (i >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static int update_phy_reg(struct fw_ohci *ohci, int addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			  int clear_bits, int set_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	int ret = read_phy_reg(ohci, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * The interrupt status bits are cleared by writing a one bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * Avoid clearing them unless explicitly requested in set_bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (addr == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		clear_bits |= PHY_INT_STATUS_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	return read_phy_reg(ohci, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static int ohci_read_phy_reg(struct fw_card *card, int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	mutex_lock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	ret = read_phy_reg(ohci, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	mutex_unlock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static int ohci_update_phy_reg(struct fw_card *card, int addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			       int clear_bits, int set_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	mutex_lock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	mutex_unlock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	return page_private(ctx->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	d = &ctx->descriptors[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	d->branch_address  &= cpu_to_le32(~0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	d->res_count       =  cpu_to_le16(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	d->transfer_status =  0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	wmb(); /* finish init of new descriptors before branch_address update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	d = &ctx->descriptors[ctx->last_buffer_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	d->branch_address  |= cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	ctx->last_buffer_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static void ar_context_release(struct ar_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct device *dev = ctx->ohci->card.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	vunmap(ctx->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	for (i = 0; i < AR_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (ctx->pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				       ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct fw_ohci *ohci = ctx->ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	/* FIXME: restart? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static inline unsigned int ar_next_buffer_index(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	return (index + 1) % AR_BUFFERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	return ar_next_buffer_index(ctx->last_buffer_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * We search for the buffer that contains the last AR packet DMA data written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * by the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 						 unsigned int *buffer_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	unsigned int i, next_i, last = ctx->last_buffer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	__le16 res_count, next_res_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	i = ar_first_buffer_index(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	res_count = READ_ONCE(ctx->descriptors[i].res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/* A buffer that is not yet completely filled must be the last one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	while (i != last && res_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		/* Peek at the next descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		next_i = ar_next_buffer_index(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		rmb(); /* read descriptors in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		 * If the next descriptor is still empty, we must stop at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		 * descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			 * The exception is when the DMA data for one packet is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			 * split over three buffers; in this case, the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			 * buffer's descriptor might be never updated by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			 * controller and look still empty, and we have to peek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			 * at the third one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 				next_i = ar_next_buffer_index(next_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 				rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 				next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 				if (next_res_count != cpu_to_le16(PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 					goto next_buffer_is_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) next_buffer_is_active:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		i = next_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		res_count = next_res_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	rmb(); /* read res_count before the DMA data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	*buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (*buffer_offset > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		*buffer_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		ar_context_abort(ctx, "corrupted descriptor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				    unsigned int end_buffer_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 				    unsigned int end_buffer_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	i = ar_first_buffer_index(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	while (i != end_buffer_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		dma_sync_single_for_cpu(ctx->ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 					ar_buffer_bus(ctx, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 					PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		i = ar_next_buffer_index(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (end_buffer_offset > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		dma_sync_single_for_cpu(ctx->ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 					ar_buffer_bus(ctx, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 					end_buffer_offset, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) #define cond_le32_to_cpu(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	(ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) #define cond_le32_to_cpu(v) le32_to_cpu(v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	struct fw_ohci *ohci = ctx->ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	struct fw_packet p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	u32 status, length, tcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	int evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	p.header[0] = cond_le32_to_cpu(buffer[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	p.header[1] = cond_le32_to_cpu(buffer[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	p.header[2] = cond_le32_to_cpu(buffer[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	tcode = (p.header[0] >> 4) & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	case TCODE_WRITE_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	case TCODE_READ_QUADLET_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		p.header[3] = (__force __u32) buffer[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		p.header_length = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		p.payload_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	case TCODE_READ_BLOCK_REQUEST :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		p.header[3] = cond_le32_to_cpu(buffer[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		p.header_length = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		p.payload_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	case TCODE_WRITE_BLOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	case TCODE_READ_BLOCK_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	case TCODE_LOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	case TCODE_LOCK_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		p.header[3] = cond_le32_to_cpu(buffer[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		p.header_length = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		p.payload_length = p.header[3] >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		if (p.payload_length > MAX_ASYNC_PAYLOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			ar_context_abort(ctx, "invalid packet length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	case TCODE_WRITE_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	case TCODE_READ_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	case OHCI_TCODE_PHY_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		p.header_length = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		p.payload_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		ar_context_abort(ctx, "invalid tcode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	p.payload = (void *) buffer + p.header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	/* FIXME: What to do about evt_* errors? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	length = (p.header_length + p.payload_length + 3) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	status = cond_le32_to_cpu(buffer[length]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	evt    = (status >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	p.ack        = evt - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	p.speed      = (status >> 21) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	p.timestamp  = status & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	p.generation = ohci->request_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * Several controllers, notably from NEC and VIA, forget to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * write ack_complete status at PHY packet reception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (evt == OHCI1394_evt_no_status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	    (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		p.ack = ACK_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	 * The OHCI bus reset handler synthesizes a PHY packet with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	 * the new generation number when a bus reset happens (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	 * section 8.4.2.3).  This helps us determine when a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	 * was received and make sure we send the response in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	 * generation.  We only need this for requests; for responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	 * we use the unique tlabel for finding the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	 * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * Alas some chips sometimes emit bus reset packets with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * wrong generation.  We set the correct generation for these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * at a slightly incorrect time (in bus_reset_work).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (evt == OHCI1394_evt_bus_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (!(ohci->quirks & QUIRK_RESET_PACKET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			ohci->request_generation = (p.header[2] >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	} else if (ctx == &ohci->ar_request_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		fw_core_handle_request(&ohci->card, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		fw_core_handle_response(&ohci->card, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	return buffer + length + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	void *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		next = handle_ar_packet(ctx, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		p = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	i = ar_first_buffer_index(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	while (i != end_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		dma_sync_single_for_device(ctx->ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 					   ar_buffer_bus(ctx, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 					   PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		ar_context_link_page(ctx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		i = ar_next_buffer_index(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) static void ar_context_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	struct ar_context *ctx = (struct ar_context *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	unsigned int end_buffer_index, end_buffer_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	void *p, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	p = ctx->pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	end_buffer_index = ar_search_last_active_buffer(ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 							&end_buffer_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (end_buffer_index < ar_first_buffer_index(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		 * The filled part of the overall buffer wraps around; handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		 * all packets up to the buffer end here.  If the last packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 * wraps around, its tail will be visible after the buffer end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 * because the buffer start pages are mapped there again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		p = handle_ar_packets(ctx, p, buffer_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		if (p < buffer_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		/* adjust p to point back into the actual buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		p -= AR_BUFFERS * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	p = handle_ar_packets(ctx, p, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (p != end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		if (p > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			ar_context_abort(ctx, "inconsistent descriptor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	ctx->pointer = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	ar_recycle_buffers(ctx, end_buffer_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	ctx->pointer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			   unsigned int descriptors_offset, u32 regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	struct device *dev = ohci->card.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	struct descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	ctx->regs        = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	ctx->ohci        = ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	for (i = 0; i < AR_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 						DMA_FROM_DEVICE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		if (!ctx->pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		set_page_private(ctx->pages[i], dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 					   DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	for (i = 0; i < AR_BUFFERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		pages[i]              = ctx->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		pages[AR_BUFFERS + i] = ctx->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!ctx->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	ctx->descriptors     = ohci->misc_buffer     + descriptors_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	for (i = 0; i < AR_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		d = &ctx->descriptors[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		d->req_count      = cpu_to_le16(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		d->control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 						DESCRIPTOR_STATUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 						DESCRIPTOR_BRANCH_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		d->data_address   = cpu_to_le32(ar_buffer_bus(ctx, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		d->branch_address = cpu_to_le32(ctx->descriptors_bus +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			ar_next_buffer_index(i) * sizeof(struct descriptor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	ar_context_release(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void ar_context_run(struct ar_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	for (i = 0; i < AR_BUFFERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		ar_context_link_page(ctx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	ctx->pointer = ctx->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	__le16 branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	/* figure out which descriptor the branch address goes in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		return d + z - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static void context_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct context *ctx = (struct context *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	struct descriptor *d, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	u32 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	int z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct descriptor_buffer *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	desc = list_entry(ctx->buffer_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			struct descriptor_buffer, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	last = ctx->last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	while (last->branch_address != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		struct descriptor_buffer *old_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		address = le32_to_cpu(last->branch_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		z = address & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		address &= ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		ctx->current_bus = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		/* If the branch address points to a buffer outside of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		 * current buffer, advance to the next buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (address < desc->buffer_bus ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				address >= desc->buffer_bus + desc->used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			desc = list_entry(desc->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 					struct descriptor_buffer, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		last = find_branch_descriptor(d, z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (!ctx->callback(ctx, d, last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		if (old_desc != desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			/* If we've advanced to the next buffer, move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			 * previous buffer to the free list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			old_desc->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			spin_lock_irqsave(&ctx->ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			list_move_tail(&old_desc->list, &ctx->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			spin_unlock_irqrestore(&ctx->ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		ctx->last = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * Allocate a new buffer and add it to the list of free buffers for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * context.  Must be called with ohci->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static int context_add_buffer(struct context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct descriptor_buffer *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	dma_addr_t bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	 * 16MB of descriptors should be far more than enough for any DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	 * program.  This will catch run-away userspace or DoS attacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (ctx->total_allocation >= 16*1024*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			&bus_addr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	offset = (void *)&desc->buffer - (void *)desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	 * for descriptors, even 0x10-byte ones. This can cause page faults when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 * an IOMMU is in use and the oversized read crosses a page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	 * Work around this by always leaving at least 0x10 bytes of padding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	desc->buffer_size = PAGE_SIZE - offset - 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	desc->buffer_bus = bus_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	desc->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	list_add_tail(&desc->list, &ctx->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	ctx->total_allocation += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int context_init(struct context *ctx, struct fw_ohci *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			u32 regs, descriptor_callback_t callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	ctx->ohci = ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	ctx->regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	ctx->total_allocation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	INIT_LIST_HEAD(&ctx->buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (context_add_buffer(ctx) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	ctx->buffer_tail = list_entry(ctx->buffer_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			struct descriptor_buffer, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	ctx->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	 * We put a dummy descriptor in the buffer that has a NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	 * branch address and looks like it's been sent.  That way we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	 * have a descriptor to append DMA programs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	ctx->last = ctx->buffer_tail->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	ctx->prev = ctx->buffer_tail->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	ctx->prev_z = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void context_release(struct context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	struct fw_card *card = &ctx->ohci->card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	struct descriptor_buffer *desc, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		dma_free_coherent(card->device, PAGE_SIZE, desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			desc->buffer_bus -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			((void *)&desc->buffer - (void *)desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* Must be called with ohci->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static struct descriptor *context_get_descriptors(struct context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 						  int z, dma_addr_t *d_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct descriptor *d = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct descriptor_buffer *desc = ctx->buffer_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (z * sizeof(*d) > desc->buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (z * sizeof(*d) > desc->buffer_size - desc->used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		/* No room for the descriptor in this buffer, so advance to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		 * next one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		if (desc->list.next == &ctx->buffer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			/* If there is no free buffer next in the list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			 * allocate one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			if (context_add_buffer(ctx) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		desc = list_entry(desc->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 				struct descriptor_buffer, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		ctx->buffer_tail = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	d = desc->buffer + desc->used / sizeof(*d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	memset(d, 0, z * sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	*d_bus = desc->buffer_bus + desc->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void context_run(struct context *ctx, u32 extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	struct fw_ohci *ohci = ctx->ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	reg_write(ohci, COMMAND_PTR(ctx->regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		  le32_to_cpu(ctx->last->branch_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	ctx->running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static void context_append(struct context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			   struct descriptor *d, int z, int extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	dma_addr_t d_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct descriptor_buffer *desc = ctx->buffer_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	struct descriptor *d_branch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	desc->used += (z + extra) * sizeof(*d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	wmb(); /* finish init of new descriptors before branch_address update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	d_branch->branch_address = cpu_to_le32(d_bus | z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 * VT6306 incorrectly checks only the single descriptor at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 * CommandPtr when the wake bit is written, so if it's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * multi-descriptor block starting with an INPUT_MORE, put a copy of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * the branch address in the first descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 * Not doing this for transmit contexts since not sure how it interacts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 * with skip addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	    d_branch != ctx->prev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	    (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	     cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		ctx->prev->branch_address = cpu_to_le32(d_bus | z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	ctx->prev = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	ctx->prev_z = z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void context_stop(struct context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	struct fw_ohci *ohci = ctx->ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	ctx->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	for (i = 0; i < 1000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		reg = reg_read(ohci, CONTROL_SET(ctx->regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		if ((reg & CONTEXT_ACTIVE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct driver_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	u8 inline_data[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	struct fw_packet *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * This function apppends a packet to the DMA queue for transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * Must always be called with the ochi->lock held to ensure proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  * generation handling and locking around packet queue manipulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static int at_context_queue_packet(struct context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				   struct fw_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct fw_ohci *ohci = ctx->ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	dma_addr_t d_bus, payload_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	struct driver_data *driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	struct descriptor *d, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	__le32 *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	int z, tcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	d = context_get_descriptors(ctx, 4, &d_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (d == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		packet->ack = RCODE_SEND_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	d[0].res_count = cpu_to_le16(packet->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 * The DMA format for asynchronous link packets is different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 * from the IEEE1394 layout, so shift the fields around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 * accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	tcode = (packet->header[0] >> 4) & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	header = (__le32 *) &d[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	case TCODE_WRITE_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	case TCODE_WRITE_BLOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	case TCODE_WRITE_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	case TCODE_READ_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	case TCODE_READ_BLOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	case TCODE_READ_QUADLET_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	case TCODE_READ_BLOCK_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	case TCODE_LOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	case TCODE_LOCK_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 					(packet->speed << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 					(packet->header[0] & 0xffff0000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		header[2] = cpu_to_le32(packet->header[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		if (TCODE_IS_BLOCK_PACKET(tcode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			header[3] = cpu_to_le32(packet->header[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			header[3] = (__force __le32) packet->header[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		d[0].req_count = cpu_to_le16(packet->header_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	case TCODE_LINK_INTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 					(packet->speed << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		header[1] = cpu_to_le32(packet->header[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		header[2] = cpu_to_le32(packet->header[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		d[0].req_count = cpu_to_le16(12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		if (is_ping_packet(&packet->header[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	case TCODE_STREAM_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 					(packet->speed << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		d[0].req_count = cpu_to_le16(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		/* BUG(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		packet->ack = RCODE_SEND_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	driver_data = (struct driver_data *) &d[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	driver_data->packet = packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	packet->driver_data = driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (packet->payload_length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		if (packet->payload_length > sizeof(driver_data->inline_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			payload_bus = dma_map_single(ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 						     packet->payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 						     packet->payload_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 						     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			if (dma_mapping_error(ohci->card.device, payload_bus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 				packet->ack = RCODE_SEND_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			packet->payload_bus	= payload_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			packet->payload_mapped	= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			memcpy(driver_data->inline_data, packet->payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			       packet->payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			payload_bus = d_bus + 3 * sizeof(*d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		d[2].req_count    = cpu_to_le16(packet->payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		d[2].data_address = cpu_to_le32(payload_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		last = &d[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		z = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		last = &d[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		z = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 				     DESCRIPTOR_IRQ_ALWAYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				     DESCRIPTOR_BRANCH_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	/* FIXME: Document how the locking works. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (ohci->generation != packet->generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		if (packet->payload_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			dma_unmap_single(ohci->card.device, payload_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 					 packet->payload_length, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		packet->ack = RCODE_GENERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	context_append(ctx, d, z, 4 - z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (ctx->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		context_run(ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static void at_context_flush(struct context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	tasklet_disable(&ctx->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	ctx->flushing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	context_tasklet((unsigned long)ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	ctx->flushing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	tasklet_enable(&ctx->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int handle_at_packet(struct context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			    struct descriptor *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			    struct descriptor *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct driver_data *driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct fw_packet *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	struct fw_ohci *ohci = context->ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	int evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (last->transfer_status == 0 && !context->flushing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		/* This descriptor isn't done yet, stop iteration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	driver_data = (struct driver_data *) &d[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	packet = driver_data->packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (packet == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		/* This packet was cancelled, just continue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	if (packet->payload_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		dma_unmap_single(ohci->card.device, packet->payload_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				 packet->payload_length, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	evt = le16_to_cpu(last->transfer_status) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	packet->timestamp = le16_to_cpu(last->res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	case OHCI1394_evt_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		/* Async response transmit timed out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		packet->ack = RCODE_CANCELLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	case OHCI1394_evt_flushed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		 * The packet was flushed should give same error as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		 * when we try to use a stale generation count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		packet->ack = RCODE_GENERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	case OHCI1394_evt_missing_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		if (context->flushing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			packet->ack = RCODE_GENERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			 * Using a valid (current) generation count, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			 * node is not on the bus or not sending acks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			packet->ack = RCODE_NO_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	case ACK_COMPLETE + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	case ACK_PENDING + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	case ACK_BUSY_X + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	case ACK_BUSY_A + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	case ACK_BUSY_B + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	case ACK_DATA_ERROR + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	case ACK_TYPE_ERROR + 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		packet->ack = evt - 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	case OHCI1394_evt_no_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		if (context->flushing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			packet->ack = RCODE_GENERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		packet->ack = RCODE_SEND_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	packet->callback(packet, &ohci->card, packet->ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) #define HEADER_GET_DESTINATION(q)	(((q) >> 16) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) #define HEADER_GET_TCODE(q)		(((q) >> 4) & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) #define HEADER_GET_OFFSET_HIGH(q)	(((q) >> 0) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) #define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) #define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static void handle_local_rom(struct fw_ohci *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			     struct fw_packet *packet, u32 csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	struct fw_packet response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	int tcode, length, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	tcode = HEADER_GET_TCODE(packet->header[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	if (TCODE_IS_BLOCK_PACKET(tcode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		length = HEADER_GET_DATA_LENGTH(packet->header[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		length = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	i = csr - CSR_CONFIG_ROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (i + length > CONFIG_ROM_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		fw_fill_response(&response, packet->header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 				 RCODE_ADDRESS_ERROR, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	} else if (!TCODE_IS_READ_REQUEST(tcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		fw_fill_response(&response, packet->header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 				 RCODE_TYPE_ERROR, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		fw_fill_response(&response, packet->header, RCODE_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 				 (void *) ohci->config_rom + i, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	fw_core_handle_response(&ohci->card, &response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static void handle_local_lock(struct fw_ohci *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			      struct fw_packet *packet, u32 csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	struct fw_packet response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	int tcode, length, ext_tcode, sel, try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	__be32 *payload, lock_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	u32 lock_arg, lock_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	tcode = HEADER_GET_TCODE(packet->header[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	length = HEADER_GET_DATA_LENGTH(packet->header[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	payload = packet->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	if (tcode == TCODE_LOCK_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	    ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		lock_arg = be32_to_cpu(payload[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		lock_data = be32_to_cpu(payload[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	} else if (tcode == TCODE_READ_QUADLET_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		lock_arg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		lock_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		fw_fill_response(&response, packet->header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 				 RCODE_TYPE_ERROR, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	sel = (csr - CSR_BUS_MANAGER_ID) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	reg_write(ohci, OHCI1394_CSRData, lock_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	reg_write(ohci, OHCI1394_CSRControl, sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	for (try = 0; try < 20; try++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 			lock_old = cpu_to_be32(reg_read(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 							OHCI1394_CSRData));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			fw_fill_response(&response, packet->header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 					 RCODE_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 					 &lock_old, sizeof(lock_old));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	ohci_err(ohci, "swap not done (CSR lock timeout)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	fw_core_handle_response(&ohci->card, &response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static void handle_local_request(struct context *ctx, struct fw_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	u64 offset, csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (ctx == &ctx->ohci->at_request_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		packet->ack = ACK_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		packet->callback(packet, &ctx->ohci->card, packet->ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		((unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		packet->header[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	csr = offset - CSR_REGISTER_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	/* Handle config rom reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		handle_local_rom(ctx->ohci, packet, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	else switch (csr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	case CSR_BUS_MANAGER_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	case CSR_BANDWIDTH_AVAILABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	case CSR_CHANNELS_AVAILABLE_HI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	case CSR_CHANNELS_AVAILABLE_LO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		handle_local_lock(ctx->ohci, packet, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		if (ctx == &ctx->ohci->at_request_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			fw_core_handle_request(&ctx->ohci->card, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			fw_core_handle_response(&ctx->ohci->card, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	if (ctx == &ctx->ohci->at_response_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		packet->ack = ACK_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		packet->callback(packet, &ctx->ohci->card, packet->ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	spin_lock_irqsave(&ctx->ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	    ctx->ohci->generation == packet->generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		spin_unlock_irqrestore(&ctx->ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		handle_local_request(ctx, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	ret = at_context_queue_packet(ctx, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	spin_unlock_irqrestore(&ctx->ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		packet->callback(packet, &ctx->ohci->card, packet->ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static void detect_dead_context(struct fw_ohci *ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 				const char *name, unsigned int regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	u32 ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	ctl = reg_read(ohci, CONTROL_SET(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	if (ctl & CONTEXT_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			name, evts[ctl & 0x1f]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static void handle_dead_contexts(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	char name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	for (i = 0; i < 32; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		if (!(ohci->it_context_support & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		sprintf(name, "IT%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	for (i = 0; i < 32; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		if (!(ohci->ir_context_support & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		sprintf(name, "IR%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	/* TODO: maybe try to flush and restart the dead contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static u32 cycle_timer_ticks(u32 cycle_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	u32 ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	ticks = cycle_timer & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	ticks += (3072 * 8000) * (cycle_timer >> 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	return ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * Some controllers exhibit one or more of the following bugs when updating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  * iso cycle timer register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  *  - When the lowest six bits are wrapping around to zero, a read that happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  *    at the same time will return garbage in the lowest ten bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  *  - When the cycleOffset field wraps around to zero, the cycleCount field is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  *    not incremented for about 60 ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  *  - Occasionally, the entire register reads zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  * To catch these, we read the register three times and ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)  * difference between each two consecutive reads is approximately the same, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * less than twice the other.  Furthermore, any negative difference indicates an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  * error.  (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  * execute, so we have enough precision to compute the ratio of the differences.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static u32 get_cycle_time(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	u32 c0, c1, c2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	u32 t0, t1, t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	s32 diff01, diff12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	if (ohci->quirks & QUIRK_CYCLE_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		c1 = c2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			c0 = c1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			c1 = c2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			t0 = cycle_timer_ticks(c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			t1 = cycle_timer_ticks(c1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			t2 = cycle_timer_ticks(c2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			diff01 = t1 - t0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 			diff12 = t2 - t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		} while ((diff01 <= 0 || diff12 <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			  diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			 && i++ < 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	return c2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)  * This function has to be called at least every 64 seconds.  The bus_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)  * field stores not only the upper 25 bits of the BUS_TIME register but also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)  * the most significant bit of the cycle timer in bit 6 so that we can detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)  * changes in this bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static u32 update_bus_time(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	if (unlikely(!ohci->bus_time_running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		                 (cycle_time_seconds & 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		ohci->bus_time_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		ohci->bus_time += 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return ohci->bus_time | cycle_time_seconds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) static int get_status_for_port(struct fw_ohci *ohci, int port_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	mutex_lock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	reg = write_phy_reg(ohci, 7, port_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (reg >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		reg = read_phy_reg(ohci, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	mutex_unlock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	if (reg < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	switch (reg & 0x0f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	case 0x06:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		return 2;	/* is child node (connected to parent node) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	case 0x0e:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		return 3;	/* is parent node (connected to child node) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	return 1;		/* not connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	int self_id_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	u32 entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	for (i = 0; i < self_id_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		entry = ohci->self_id_buffer[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		if ((self_id & 0xff000000) == (entry & 0xff000000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		if ((self_id & 0xff000000) < (entry & 0xff000000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static int initiated_reset(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	mutex_lock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	if (reg >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		reg = read_phy_reg(ohci, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		reg |= 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		if (reg >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			reg = read_phy_reg(ohci, 12); /* read register 12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			if (reg >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 				if ((reg & 0x08) == 0x08) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 					/* bit 3 indicates "initiated reset" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 					ret = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	mutex_unlock(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)  * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)  * Construct the selfID from phy register contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	int reg, i, pos, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	/* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	u32 self_id = 0x8040c800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	reg = reg_read(ohci, OHCI1394_NodeID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	if (!(reg & OHCI1394_NodeID_idValid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 			    "node ID not valid, new bus reset in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	self_id |= ((reg & 0x3f) << 24); /* phy ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	reg = ohci_read_phy_reg(&ohci->card, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (reg < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	self_id |= ((reg & 0x07) << 8); /* power class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	reg = ohci_read_phy_reg(&ohci->card, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (reg < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	self_id |= ((reg & 0x3f) << 16); /* gap count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		status = get_status_for_port(ohci, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		self_id |= ((status & 0x3) << (6 - (i * 2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	self_id |= initiated_reset(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	pos = get_self_id_pos(ohci, self_id, self_id_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	if (pos >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		memmove(&(ohci->self_id_buffer[pos+1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			&(ohci->self_id_buffer[pos]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			(self_id_count - pos) * sizeof(*ohci->self_id_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		ohci->self_id_buffer[pos] = self_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		self_id_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	return self_id_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static void bus_reset_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	struct fw_ohci *ohci =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		container_of(work, struct fw_ohci, bus_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	int self_id_count, generation, new_generation, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	void *free_rom = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	dma_addr_t free_rom_bus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	bool is_new_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	reg = reg_read(ohci, OHCI1394_NodeID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (!(reg & OHCI1394_NodeID_idValid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 			    "node ID not valid, new bus reset in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		ohci_notice(ohci, "malconfigured bus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			       OHCI1394_NodeID_nodeNumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	is_new_root = (reg & OHCI1394_NodeID_root) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if (!(ohci->is_root && is_new_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		reg_write(ohci, OHCI1394_LinkControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			  OHCI1394_LinkControl_cycleMaster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	ohci->is_root = is_new_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	reg = reg_read(ohci, OHCI1394_SelfIDCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	if (reg & OHCI1394_SelfIDCount_selfIDError) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		ohci_notice(ohci, "self ID receive error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	 * The count in the SelfIDCount register is the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	 * bytes in the self ID receive buffer.  Since we also receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	 * the inverted quadlets and a header quadlet, we shift one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	 * bit extra to get the actual number of self IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	self_id_count = (reg >> 3) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	if (self_id_count > 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		u32 id  = cond_le32_to_cpu(ohci->self_id[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		if (id != ~id2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 			 * If the invalid data looks like a cycle start packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			 * it's likely to be the result of the cycle master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			 * having a wrong gap count.  In this case, the self IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			 * so far are valid and should be processed so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			 * bus manager can then correct the gap count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			if (id == 0xffff008f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 				ohci_notice(ohci, "ignoring spurious self IDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 				self_id_count = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 				    j, self_id_count, id, id2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		ohci->self_id_buffer[j] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (ohci->quirks & QUIRK_TI_SLLZ059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		self_id_count = find_and_insert_self_id(ohci, self_id_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		if (self_id_count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 				    "could not construct local self ID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	if (self_id_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		ohci_notice(ohci, "no self IDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	 * Check the consistency of the self IDs we just read.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	 * problem we face is that a new bus reset can start while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	 * read out the self IDs from the DMA buffer. If this happens,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	 * the DMA buffer will be overwritten with new self IDs and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	 * will read out inconsistent data.  The OHCI specification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	 * (section 11.2) recommends a technique similar to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	 * linux/seqlock.h, where we remember the generation of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	 * self IDs in the buffer before reading them out and compare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	 * it to the current generation after reading them out.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	 * the two generations match we know we have a consistent set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	 * of self IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	if (new_generation != generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		ohci_notice(ohci, "new bus reset, discarding self ids\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	/* FIXME: Document how the locking works. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	spin_lock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	ohci->generation = -1; /* prevent AT packet queueing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	context_stop(&ohci->at_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	context_stop(&ohci->at_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	spin_unlock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	 * packets in the AT queues and software needs to drain them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	at_context_flush(&ohci->at_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	at_context_flush(&ohci->at_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	spin_lock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	ohci->generation = generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (ohci->quirks & QUIRK_RESET_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		ohci->request_generation = generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	 * This next bit is unrelated to the AT context stuff but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	 * have to do it under the spinlock also.  If a new config rom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	 * was set up before this reset, the old one is now no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	 * in use and we can free it. Update the config rom pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	 * to point to the current config rom and clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	 * next_config_rom pointer so a new update can take place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	if (ohci->next_config_rom != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		if (ohci->next_config_rom != ohci->config_rom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			free_rom      = ohci->config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			free_rom_bus  = ohci->config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		ohci->config_rom      = ohci->next_config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		ohci->config_rom_bus  = ohci->next_config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		ohci->next_config_rom = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		 * Restore config_rom image and manually update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		 * config_rom registers.  Writing the header quadlet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		 * will indicate that the config rom is ready, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		 * do that last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		reg_write(ohci, OHCI1394_BusOptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 			  be32_to_cpu(ohci->config_rom[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		ohci->config_rom[0] = ohci->next_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		reg_write(ohci, OHCI1394_ConfigROMhdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			  be32_to_cpu(ohci->next_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	if (param_remote_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	spin_unlock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	if (free_rom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 				  free_rom, free_rom_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	log_selfids(ohci, generation, self_id_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 				 self_id_count, ohci->self_id_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 				 ohci->csr_state_setclear_abdicate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	ohci->csr_state_setclear_abdicate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static irqreturn_t irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	struct fw_ohci *ohci = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	u32 event, iso_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	event = reg_read(ohci, OHCI1394_IntEventClear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	if (!event || !~event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	 * busReset and postedWriteErr must not be cleared yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	reg_write(ohci, OHCI1394_IntEventClear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		  event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	log_irqs(ohci, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	if (event & OHCI1394_selfIDComplete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		queue_work(selfid_workqueue, &ohci->bus_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	if (event & OHCI1394_RQPkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		tasklet_schedule(&ohci->ar_request_ctx.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	if (event & OHCI1394_RSPkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		tasklet_schedule(&ohci->ar_response_ctx.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	if (event & OHCI1394_reqTxComplete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		tasklet_schedule(&ohci->at_request_ctx.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	if (event & OHCI1394_respTxComplete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		tasklet_schedule(&ohci->at_response_ctx.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	if (event & OHCI1394_isochRx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		while (iso_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 			i = ffs(iso_event) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			tasklet_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 				&ohci->ir_context_list[i].context.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 			iso_event &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	if (event & OHCI1394_isochTx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		while (iso_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			i = ffs(iso_event) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			tasklet_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 				&ohci->it_context_list[i].context.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			iso_event &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (unlikely(event & OHCI1394_regAccessFail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		ohci_err(ohci, "register access failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	if (unlikely(event & OHCI1394_postedWriteErr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		reg_read(ohci, OHCI1394_PostedWriteAddressHi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		reg_read(ohci, OHCI1394_PostedWriteAddressLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		reg_write(ohci, OHCI1394_IntEventClear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			  OHCI1394_postedWriteErr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			ohci_err(ohci, "PCI posted write error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (unlikely(event & OHCI1394_cycleTooLong)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 			ohci_notice(ohci, "isochronous cycle too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		reg_write(ohci, OHCI1394_LinkControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			  OHCI1394_LinkControl_cycleMaster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (unlikely(event & OHCI1394_cycleInconsistent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		 * We need to clear this event bit in order to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		 * cycleMatch isochronous I/O work.  In theory we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		 * stop active cycleMatch iso contexts now and restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		 * them at least two cycles later.  (FIXME?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			ohci_notice(ohci, "isochronous cycle inconsistent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	if (unlikely(event & OHCI1394_unrecoverableError))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		handle_dead_contexts(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	if (event & OHCI1394_cycle64Seconds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		spin_lock(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		update_bus_time(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		spin_unlock(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) static int software_reset(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	for (i = 0; i < 500; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		val = reg_read(ohci, OHCI1394_HCControlSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		if (!~val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			return -ENODEV; /* Card was ejected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		if (!(val & OHCI1394_HCControl_softReset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	size_t size = length * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	memcpy(dest, src, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	if (size < CONFIG_ROM_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) static int configure_1394a_enhancements(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	bool enable_1394a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	int ret, clear, set, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	/* Check if the driver should configure link and PHY. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	if (!(reg_read(ohci, OHCI1394_HCControlSet) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	      OHCI1394_HCControl_programPhyEnable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	/* Paranoia: check whether the PHY supports 1394a, too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	enable_1394a = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	ret = read_phy_reg(ohci, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		ret = read_paged_phy_reg(ohci, 1, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		if (ret >= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			enable_1394a = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (ohci->quirks & QUIRK_NO_1394A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		enable_1394a = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	/* Configure PHY and link consistently. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	if (enable_1394a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		clear = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	ret = update_phy_reg(ohci, 5, clear, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	if (enable_1394a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		offset = OHCI1394_HCControlSet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		offset = OHCI1394_HCControlClear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	/* Clean up: configuration has been taken care of. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	reg_write(ohci, OHCI1394_HCControlClear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		  OHCI1394_HCControl_programPhyEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static int probe_tsb41ba3d(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	/* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	int reg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	reg = read_phy_reg(ohci, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	if (reg < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		reg = read_paged_phy_reg(ohci, 1, i + 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		if (reg < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		if (reg != id[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) static int ohci_enable(struct fw_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		       const __be32 *config_rom, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	u32 lps, version, irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	ret = software_reset(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		ohci_err(ohci, "failed to reset ohci card\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	 * Now enable LPS, which we need in order to start accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	 * most of the registers.  In fact, on some cards (ALI M5251),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	 * accessing registers in the SClk domain without LPS enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	 * will lock up the machine.  Wait 50msec to make sure we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	 * full link enabled.  However, with some cards (well, at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	 * a JMicron PCIe card), we have to try again sometimes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	 * cannot actually use the phy at that time.  These need tens of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	 * millisecods pause between LPS write and first phy access too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	reg_write(ohci, OHCI1394_HCControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		  OHCI1394_HCControl_LPS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		  OHCI1394_HCControl_postedWriteEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	for (lps = 0, i = 0; !lps && i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		lps = reg_read(ohci, OHCI1394_HCControlSet) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		      OHCI1394_HCControl_LPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	if (!lps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		ohci_err(ohci, "failed to set Link Power Status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	if (ohci->quirks & QUIRK_TI_SLLZ059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		ret = probe_tsb41ba3d(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			ohci_notice(ohci, "local TSB41BA3D phy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			ohci->quirks &= ~QUIRK_TI_SLLZ059;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	reg_write(ohci, OHCI1394_HCControlClear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		  OHCI1394_HCControl_noByteSwapData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	reg_write(ohci, OHCI1394_LinkControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		  OHCI1394_LinkControl_cycleTimerEnable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		  OHCI1394_LinkControl_cycleMaster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	reg_write(ohci, OHCI1394_ATRetries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		  OHCI1394_MAX_AT_REQ_RETRIES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		  (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		  (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		  (200 << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	ohci->bus_time_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		if (ohci->ir_context_support & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 			reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 				  IR_CONTEXT_MULTI_CHANNEL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	if (version >= OHCI_VERSION_1_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 			  0xfffffffe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		card->broadcast_channel_auto_allocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	/* Get implemented bits of the priority arbitration request counter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	reg_write(ohci, OHCI1394_FairnessControl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	card->priority_budget_implemented = ohci->pri_req_max != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	reg_write(ohci, OHCI1394_IntEventClear, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	reg_write(ohci, OHCI1394_IntMaskClear, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	ret = configure_1394a_enhancements(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	/* Activate link_on bit and contender bit in our self ID packets.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	 * When the link is not yet enabled, the atomic config rom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	 * update mechanism described below in ohci_set_config_rom()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	 * is not active.  We have to update ConfigRomHeader and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	 * BusOptions manually, and the write to ConfigROMmap takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 * effect immediately.  We tie this to the enabling of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	 * link, so we have a valid config rom before enabling - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	 * OHCI requires that ConfigROMhdr and BusOptions have valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	 * values before enabling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	 * However, when the ConfigROMmap is written, some controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	 * always read back quadlets 0 and 2 from the config rom to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	 * the ConfigRomHeader and BusOptions registers on bus reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	 * They shouldn't do that in this initial case where the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	 * isn't enabled.  This means we have to use the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	 * workaround here, setting the bus header to 0 and then write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	 * the right values in the bus reset tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	if (config_rom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		ohci->next_config_rom =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 			dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 					   &ohci->next_config_rom_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 					   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		if (ohci->next_config_rom == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		copy_config_rom(ohci->next_config_rom, config_rom, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		 * In the suspend case, config_rom is NULL, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		 * means that we just reuse the old config rom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		ohci->next_config_rom = ohci->config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		ohci->next_config_rom_bus = ohci->config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	ohci->next_header = ohci->next_config_rom[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	ohci->next_config_rom[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	reg_write(ohci, OHCI1394_BusOptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		  be32_to_cpu(ohci->next_config_rom[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	irqs =	OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		OHCI1394_RQPkt | OHCI1394_RSPkt |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		OHCI1394_isochTx | OHCI1394_isochRx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		OHCI1394_postedWriteErr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		OHCI1394_selfIDComplete |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		OHCI1394_regAccessFail |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		OHCI1394_cycleInconsistent |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		OHCI1394_unrecoverableError |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		OHCI1394_cycleTooLong |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		OHCI1394_masterIntEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		irqs |= OHCI1394_busReset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	reg_write(ohci, OHCI1394_IntMaskSet, irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	reg_write(ohci, OHCI1394_HCControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		  OHCI1394_HCControl_linkEnable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		  OHCI1394_HCControl_BIBimageValid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	reg_write(ohci, OHCI1394_LinkControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		  OHCI1394_LinkControl_rcvSelfID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		  OHCI1394_LinkControl_rcvPhyPkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	ar_context_run(&ohci->ar_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	ar_context_run(&ohci->ar_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	/* We are ready to go, reset bus to finish initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	fw_schedule_bus_reset(&ohci->card, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) static int ohci_set_config_rom(struct fw_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			       const __be32 *config_rom, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	struct fw_ohci *ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	__be32 *next_config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	dma_addr_t next_config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	 * When the OHCI controller is enabled, the config rom update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	 * mechanism is a bit tricky, but easy enough to use.  See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	 * section 5.5.6 in the OHCI specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	 * The OHCI controller caches the new config rom address in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	 * shadow register (ConfigROMmapNext) and needs a bus reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	 * for the changes to take place.  When the bus reset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	 * detected, the controller loads the new values for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	 * ConfigRomHeader and BusOptions registers from the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	 * config rom and loads ConfigROMmap from the ConfigROMmapNext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	 * shadow register. All automatically and atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	 * Now, there's a twist to this story.  The automatic load of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	 * ConfigRomHeader and BusOptions doesn't honor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	 * noByteSwapData bit, so with a be32 config rom, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	 * controller will load be32 values in to these registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	 * during the atomic update, even on litte endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	 * architectures.  The workaround we use is to put a 0 in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	 * header quadlet; 0 is endian agnostic and means that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	 * config rom isn't ready yet.  In the bus reset tasklet we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	 * then set up the real values for the two registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	 * We use ohci->lock to avoid racing with the code that sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	 * ohci->next_config_rom to NULL (see bus_reset_work).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	next_config_rom =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 				   &next_config_rom_bus, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	if (next_config_rom == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	spin_lock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	 * If there is not an already pending config_rom update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	 * push our new allocation into the ohci->next_config_rom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	 * and then mark the local variable as null so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	 * won't deallocate the new buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	 * OTOH, if there is a pending config_rom update, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	 * use that buffer with the new config_rom data, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	 * let this routine free the unused DMA allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	if (ohci->next_config_rom == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		ohci->next_config_rom = next_config_rom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		ohci->next_config_rom_bus = next_config_rom_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		next_config_rom = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	copy_config_rom(ohci->next_config_rom, config_rom, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	ohci->next_header = config_rom[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	ohci->next_config_rom[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	spin_unlock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	/* If we didn't use the DMA allocation, delete it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	if (next_config_rom != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 				  next_config_rom, next_config_rom_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	 * Now initiate a bus reset to have the changes take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	 * effect. We clean up the old config rom memory and DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	 * mappings in the bus reset tasklet, since the OHCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	 * controller could need to access it before the bus reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	 * takes effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	fw_schedule_bus_reset(&ohci->card, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	at_context_transmit(&ohci->at_request_ctx, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	at_context_transmit(&ohci->at_response_ctx, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	struct context *ctx = &ohci->at_request_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	struct driver_data *driver_data = packet->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	tasklet_disable(&ctx->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	if (packet->ack != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	if (packet->payload_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		dma_unmap_single(ohci->card.device, packet->payload_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 				 packet->payload_length, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	driver_data->packet = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	packet->ack = RCODE_CANCELLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	packet->callback(packet, &ohci->card, packet->ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	tasklet_enable(&ctx->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) static int ohci_enable_phys_dma(struct fw_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 				int node_id, int generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	int n, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	if (param_remote_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	 * FIXME:  Make sure this bitmask is cleared when we clear the busReset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	 * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	spin_lock_irqsave(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	if (ohci->generation != generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		ret = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	 * Note, if the node ID contains a non-local bus ID, physical DMA is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	 * enabled for _all_ nodes on remote buses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	if (n < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	spin_unlock_irqrestore(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	switch (csr_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	case CSR_STATE_CLEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	case CSR_STATE_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		if (ohci->is_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		    (reg_read(ohci, OHCI1394_LinkControlSet) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		     OHCI1394_LinkControl_cycleMaster))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 			value = CSR_STATE_BIT_CMSTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 			value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		if (ohci->csr_state_setclear_abdicate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 			value |= CSR_STATE_BIT_ABDICATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	case CSR_NODE_IDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 		return reg_read(ohci, OHCI1394_NodeID) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	case CSR_CYCLE_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		return get_cycle_time(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	case CSR_BUS_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		 * We might be called just after the cycle timer has wrapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		 * around but just before the cycle64Seconds handler, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 		 * better check here, too, if the bus time needs to be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		spin_lock_irqsave(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		value = update_bus_time(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		spin_unlock_irqrestore(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	case CSR_BUSY_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		value = reg_read(ohci, OHCI1394_ATRetries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		return (value >> 4) & 0x0ffff00f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	case CSR_PRIORITY_BUDGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 			(ohci->pri_req_max << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	switch (csr_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	case CSR_STATE_CLEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			reg_write(ohci, OHCI1394_LinkControlClear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 				  OHCI1394_LinkControl_cycleMaster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 			flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		if (value & CSR_STATE_BIT_ABDICATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 			ohci->csr_state_setclear_abdicate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	case CSR_STATE_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 			reg_write(ohci, OHCI1394_LinkControlSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 				  OHCI1394_LinkControl_cycleMaster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 			flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		if (value & CSR_STATE_BIT_ABDICATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			ohci->csr_state_setclear_abdicate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	case CSR_NODE_IDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		reg_write(ohci, OHCI1394_NodeID, value >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	case CSR_CYCLE_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		reg_write(ohci, OHCI1394_IntEventSet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 			  OHCI1394_cycleInconsistent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	case CSR_BUS_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		spin_lock_irqsave(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		ohci->bus_time = (update_bus_time(ohci) & 0x40) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		                 (value & ~0x7f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		spin_unlock_irqrestore(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	case CSR_BUSY_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		value = (value & 0xf) | ((value & 0xf) << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		reg_write(ohci, OHCI1394_ATRetries, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	case CSR_PRIORITY_BUDGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) static void flush_iso_completions(struct iso_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			      ctx->header_length, ctx->header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			      ctx->base.callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	ctx->header_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	u32 *ctx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		if (ctx->base.drop_overflow_headers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		flush_iso_completions(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	ctx_hdr = ctx->header + ctx->header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	 * The two iso header quadlets are byteswapped to little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	 * endian by the controller, but we want to present them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	 * as big endian for consistency with the bus endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	if (ctx->base.header_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (ctx->base.header_size > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	if (ctx->base.header_size > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	ctx->header_length += ctx->base.header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) static int handle_ir_packet_per_buffer(struct context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 				       struct descriptor *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 				       struct descriptor *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	struct iso_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		container_of(context, struct iso_context, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	struct descriptor *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	u32 buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	for (pd = d; pd <= last; pd++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 		if (pd->transfer_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	if (pd > last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		/* Descriptor(s) not done yet, stop iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		buffer_dma = le32_to_cpu(d->data_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		dma_sync_single_range_for_cpu(context->ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 					      buffer_dma & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 					      buffer_dma & ~PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 					      le16_to_cpu(d->req_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 					      DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	copy_iso_headers(ctx, (u32 *) (last + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		flush_iso_completions(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) /* d == last because each descriptor block is only a single descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) static int handle_ir_buffer_fill(struct context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 				 struct descriptor *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 				 struct descriptor *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	struct iso_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		container_of(context, struct iso_context, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	unsigned int req_count, res_count, completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	u32 buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	req_count = le16_to_cpu(last->req_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	res_count = le16_to_cpu(READ_ONCE(last->res_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	completed = req_count - res_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	buffer_dma = le32_to_cpu(last->data_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	if (completed > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		ctx->mc_buffer_bus = buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		ctx->mc_completed = completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	if (res_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		/* Descriptor(s) not done yet, stop iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	dma_sync_single_range_for_cpu(context->ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 				      buffer_dma & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 				      buffer_dma & ~PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 				      completed, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		ctx->base.callback.mc(&ctx->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 				      buffer_dma + completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 				      ctx->base.callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		ctx->mc_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static void flush_ir_buffer_fill(struct iso_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 				      ctx->mc_buffer_bus & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 				      ctx->mc_buffer_bus & ~PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 				      ctx->mc_completed, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	ctx->base.callback.mc(&ctx->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 			      ctx->mc_buffer_bus + ctx->mc_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 			      ctx->base.callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	ctx->mc_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static inline void sync_it_packet_for_cpu(struct context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 					  struct descriptor *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	__le16 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	u32 buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	/* only packets beginning with OUTPUT_MORE* have data buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	/* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	pd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	 * data buffer is in the context program's coherent page and must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	 * be synced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	    (context->current_bus          & PAGE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		pd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 		buffer_dma = le32_to_cpu(pd->data_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		dma_sync_single_range_for_cpu(context->ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 					      buffer_dma & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 					      buffer_dma & ~PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 					      le16_to_cpu(pd->req_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 					      DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		control = pd->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		pd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	} while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) static int handle_it_packet(struct context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 			    struct descriptor *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 			    struct descriptor *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	struct iso_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		container_of(context, struct iso_context, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	struct descriptor *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	__be32 *ctx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	for (pd = d; pd <= last; pd++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		if (pd->transfer_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	if (pd > last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 		/* Descriptor(s) not done yet, stop iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	sync_it_packet_for_cpu(context, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	if (ctx->header_length + 4 > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		if (ctx->base.drop_overflow_headers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		flush_iso_completions(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	ctx_hdr = ctx->header + ctx->header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	ctx->last_timestamp = le16_to_cpu(last->res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	/* Present this value as big-endian to match the receive code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	*ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 			       le16_to_cpu(pd->res_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	ctx->header_length += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		flush_iso_completions(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	u32 hi = channels >> 32, lo = channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	ohci->mc_channels = channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 				int type, int channel, size_t header_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	struct fw_ohci *ohci = fw_ohci(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	struct iso_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	descriptor_callback_t callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	u64 *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	u32 *mask, regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	int index, ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	spin_lock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	case FW_ISO_CONTEXT_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		mask     = &ohci->it_context_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		callback = handle_it_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		index    = ffs(*mask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 			*mask &= ~(1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 			regs = OHCI1394_IsoXmitContextBase(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 			ctx  = &ohci->it_context_list[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		channels = &ohci->ir_context_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		mask     = &ohci->ir_context_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		callback = handle_ir_packet_per_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		index    = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 			*channels &= ~(1ULL << channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 			*mask     &= ~(1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 			regs = OHCI1394_IsoRcvContextBase(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 			ctx  = &ohci->ir_context_list[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		mask     = &ohci->ir_context_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		callback = handle_ir_buffer_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		index    = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 			ohci->mc_allocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 			*mask &= ~(1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 			regs = OHCI1394_IsoRcvContextBase(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			ctx  = &ohci->ir_context_list[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	spin_unlock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	if (index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	memset(ctx, 0, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	ctx->header_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	ctx->header = (void *) __get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	if (ctx->header == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	ret = context_init(&ctx->context, ohci, regs, callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		goto out_with_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 		set_multichannel_mask(ohci, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		ctx->mc_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	return &ctx->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)  out_with_header:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	free_page((unsigned long)ctx->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	spin_lock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 		*channels |= 1ULL << channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		ohci->mc_allocated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	*mask |= 1 << index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	spin_unlock_irq(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) static int ohci_start_iso(struct fw_iso_context *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 			  s32 cycle, u32 sync, u32 tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	struct iso_context *ctx = container_of(base, struct iso_context, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	struct fw_ohci *ohci = ctx->context.ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	u32 control = IR_CONTEXT_ISOCH_HEADER, match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	/* the controller cannot start without any queued packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	if (ctx->context.last->branch_address == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	switch (ctx->base.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	case FW_ISO_CONTEXT_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		index = ctx - ohci->it_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 		match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		if (cycle >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 			match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 				(cycle & 0x7fff) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		context_run(&ctx->context, match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 		control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		index = ctx - ohci->ir_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		match = (tags << 28) | (sync << 8) | ctx->base.channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 		if (cycle >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 			match |= (cycle & 0x07fff) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 			control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		context_run(&ctx->context, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 		ctx->sync = sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 		ctx->tags = tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) static int ohci_stop_iso(struct fw_iso_context *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	struct fw_ohci *ohci = fw_ohci(base->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	struct iso_context *ctx = container_of(base, struct iso_context, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	switch (ctx->base.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	case FW_ISO_CONTEXT_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		index = ctx - ohci->it_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		index = ctx - ohci->ir_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	context_stop(&ctx->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	tasklet_kill(&ctx->context.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) static void ohci_free_iso_context(struct fw_iso_context *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	struct fw_ohci *ohci = fw_ohci(base->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	struct iso_context *ctx = container_of(base, struct iso_context, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	ohci_stop_iso(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	context_release(&ctx->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	free_page((unsigned long)ctx->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	spin_lock_irqsave(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	switch (base->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	case FW_ISO_CONTEXT_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 		index = ctx - ohci->it_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		ohci->it_context_mask |= 1 << index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		index = ctx - ohci->ir_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 		ohci->ir_context_mask |= 1 << index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		ohci->ir_context_channels |= 1ULL << base->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		index = ctx - ohci->ir_context_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		ohci->ir_context_mask |= 1 << index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 		ohci->ir_context_channels |= ohci->mc_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		ohci->mc_channels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		ohci->mc_allocated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	spin_unlock_irqrestore(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	struct fw_ohci *ohci = fw_ohci(base->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	switch (base->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		spin_lock_irqsave(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		/* Don't allow multichannel to grab other contexts' channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 		if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 			*channels = ohci->ir_context_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 			set_multichannel_mask(ohci, *channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		spin_unlock_irqrestore(&ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) static void ohci_resume_iso_dma(struct fw_ohci *ohci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	struct iso_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	for (i = 0 ; i < ohci->n_ir ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		ctx = &ohci->ir_context_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		if (ctx->context.running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 			ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	for (i = 0 ; i < ohci->n_it ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		ctx = &ohci->it_context_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		if (ctx->context.running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 			ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) static int queue_iso_transmit(struct iso_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 			      struct fw_iso_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 			      struct fw_iso_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 			      unsigned long payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	struct descriptor *d, *last, *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	struct fw_iso_packet *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	__le32 *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	dma_addr_t d_bus, page_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	u32 z, header_z, payload_z, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	u32 payload_index, payload_end_index, next_page_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	int page, end_page, i, length, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	p = packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	payload_index = payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	if (p->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		z = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		z = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	if (p->header_length > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 		z++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	/* Determine the first page the payload isn't contained in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	if (p->payload_length > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 		payload_z = end_page - (payload_index >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 		payload_z = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	z += payload_z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	/* Get header size in number of descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	if (d == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	if (!p->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		d[0].req_count = cpu_to_le16(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		 * Link the skip address to this descriptor itself.  This causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		 * a context to skip a cycle whenever lost cycles or FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		 * overruns occur, without dropping the data.  The application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		 * should then decide whether this is an error condition or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 		 * FIXME:  Make the context's cycle-lost behaviour configurable?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 		d[0].branch_address = cpu_to_le32(d_bus | z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		header = (__le32 *) &d[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 					IT_HEADER_TAG(p->tag) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 					IT_HEADER_TCODE(TCODE_STREAM_DATA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 					IT_HEADER_CHANNEL(ctx->base.channel) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 					IT_HEADER_SPEED(ctx->base.speed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		header[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 			cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 							  p->payload_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	if (p->header_length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		d[2].req_count    = cpu_to_le16(p->header_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 		d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 		memcpy(&d[z], p->header, p->header_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	pd = d + z - payload_z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	payload_end_index = payload_index + p->payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	for (i = 0; i < payload_z; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		page               = payload_index >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		offset             = payload_index & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		next_page_index    = (page + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		length             =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 			min(next_page_index, payload_end_index) - payload_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		pd[i].req_count    = cpu_to_le16(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 		page_bus = page_private(buffer->pages[page]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		pd[i].data_address = cpu_to_le32(page_bus + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		dma_sync_single_range_for_device(ctx->context.ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 						 page_bus, offset, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 						 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 		payload_index += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	if (p->interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		irq = DESCRIPTOR_IRQ_ALWAYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 		irq = DESCRIPTOR_NO_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	last = z == 2 ? d : d + z - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 				     DESCRIPTOR_STATUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 				     DESCRIPTOR_BRANCH_ALWAYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 				     irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	context_append(&ctx->context, d, z, header_z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) static int queue_iso_packet_per_buffer(struct iso_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 				       struct fw_iso_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 				       struct fw_iso_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 				       unsigned long payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	struct device *device = ctx->context.ohci->card.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	struct descriptor *d, *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	dma_addr_t d_bus, page_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	u32 z, header_z, rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	int i, j, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	int page, offset, packet_count, header_size, payload_per_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	 * The OHCI controller puts the isochronous header and trailer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	 * buffer, so we need at least 8 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	packet_count = packet->header_length / ctx->base.header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	header_size  = max(ctx->base.header_size, (size_t)8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	/* Get header size in number of descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	header_z = DIV_ROUND_UP(header_size, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	page     = payload >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 	offset   = payload & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	payload_per_buffer = packet->payload_length / packet_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	for (i = 0; i < packet_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		/* d points to the header descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 		d = context_get_descriptors(&ctx->context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 				z + header_z, &d_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 		if (d == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 					      DESCRIPTOR_INPUT_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		if (packet->skip && i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 			d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		d->req_count    = cpu_to_le16(header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		d->res_count    = d->req_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 		d->transfer_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		rest = payload_per_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		pd = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		for (j = 1; j < z; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 			pd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 			pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 						  DESCRIPTOR_INPUT_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 			if (offset + rest < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 				length = rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 				length = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 			pd->req_count = cpu_to_le16(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 			pd->res_count = pd->req_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 			pd->transfer_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 			page_bus = page_private(buffer->pages[page]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 			pd->data_address = cpu_to_le32(page_bus + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 			dma_sync_single_range_for_device(device, page_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 							 offset, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 							 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 			offset = (offset + length) & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 			rest -= length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 			if (offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 				page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 					  DESCRIPTOR_INPUT_LAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 					  DESCRIPTOR_BRANCH_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		if (packet->interrupt && i == packet_count - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 			pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		context_append(&ctx->context, d, z, header_z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) static int queue_iso_buffer_fill(struct iso_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 				 struct fw_iso_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 				 struct fw_iso_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 				 unsigned long payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	struct descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	dma_addr_t d_bus, page_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	int page, offset, rest, z, i, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	page   = payload >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	offset = payload & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	rest   = packet->payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	/* We need one descriptor for each page in the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	for (i = 0; i < z; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		d = context_get_descriptors(&ctx->context, 1, &d_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 		if (d == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 					 DESCRIPTOR_BRANCH_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		if (packet->skip && i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 			d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 		if (packet->interrupt && i == z - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 			d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		if (offset + rest < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 			length = rest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 			length = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		d->req_count = cpu_to_le16(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		d->res_count = d->req_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		d->transfer_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 		page_bus = page_private(buffer->pages[page]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 		d->data_address = cpu_to_le32(page_bus + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 		dma_sync_single_range_for_device(ctx->context.ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 						 page_bus, offset, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 						 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 		rest -= length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 		page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 		context_append(&ctx->context, d, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) static int ohci_queue_iso(struct fw_iso_context *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 			  struct fw_iso_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 			  struct fw_iso_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 			  unsigned long payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	struct iso_context *ctx = container_of(base, struct iso_context, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	int ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	spin_lock_irqsave(&ctx->context.ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	switch (base->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	case FW_ISO_CONTEXT_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 		ret = queue_iso_transmit(ctx, packet, buffer, payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static void ohci_flush_queue_iso(struct fw_iso_context *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	struct context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 			&container_of(base, struct iso_context, base)->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) static int ohci_flush_iso_completions(struct fw_iso_context *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	struct iso_context *ctx = container_of(base, struct iso_context, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	tasklet_disable(&ctx->context.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		context_tasklet((unsigned long)&ctx->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		switch (base->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		case FW_ISO_CONTEXT_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		case FW_ISO_CONTEXT_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 			if (ctx->header_length != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 				flush_iso_completions(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			if (ctx->mc_completed != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 				flush_ir_buffer_fill(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 			ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 		clear_bit_unlock(0, &ctx->flushing_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	tasklet_enable(&ctx->context.tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) static const struct fw_card_driver ohci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	.enable			= ohci_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	.read_phy_reg		= ohci_read_phy_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	.update_phy_reg		= ohci_update_phy_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	.set_config_rom		= ohci_set_config_rom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	.send_request		= ohci_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	.send_response		= ohci_send_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	.cancel_packet		= ohci_cancel_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	.enable_phys_dma	= ohci_enable_phys_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	.read_csr		= ohci_read_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	.write_csr		= ohci_write_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	.allocate_iso_context	= ohci_allocate_iso_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	.free_iso_context	= ohci_free_iso_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	.set_iso_channels	= ohci_set_iso_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	.queue_iso		= ohci_queue_iso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	.flush_queue_iso	= ohci_flush_queue_iso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	.flush_iso_completions	= ohci_flush_iso_completions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	.start_iso		= ohci_start_iso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	.stop_iso		= ohci_stop_iso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) static void pmac_ohci_on(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	if (machine_is(powermac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		struct device_node *ofn = pci_device_to_OF_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		if (ofn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) static void pmac_ohci_off(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	if (machine_is(powermac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 		struct device_node *ofn = pci_device_to_OF_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 		if (ofn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 			pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) static inline void pmac_ohci_on(struct pci_dev *dev) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) static inline void pmac_ohci_off(struct pci_dev *dev) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) #endif /* CONFIG_PPC_PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) static int pci_probe(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 			       const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	struct fw_ohci *ohci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	u32 bus_options, max_receive, link_speed, version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	u64 guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 		dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	if (ohci == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	pmac_ohci_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	err = pci_enable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		dev_err(&dev->dev, "failed to enable OHCI hardware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		goto fail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	pci_set_master(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	pci_set_drvdata(dev, ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	spin_lock_init(&ohci->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	mutex_init(&ohci->phy_reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	    pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		ohci_err(ohci, "invalid MMIO resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 		goto fail_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	err = pci_request_region(dev, 0, ohci_driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		ohci_err(ohci, "MMIO resource unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		goto fail_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	if (ohci->registers == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		ohci_err(ohci, "failed to remap registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 		goto fail_iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		if ((ohci_quirks[i].vendor == dev->vendor) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 		    (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		     ohci_quirks[i].device == dev->device) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		    (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		     ohci_quirks[i].revision >= dev->revision)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 			ohci->quirks = ohci_quirks[i].flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	if (param_quirks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 		ohci->quirks = param_quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	 * Because dma_alloc_coherent() allocates at least one page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	 * we save space by using a common buffer for the AR request/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	 * response descriptors and the self IDs buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 					       PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 					       &ohci->misc_buffer_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 					       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	if (!ohci->misc_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 		goto fail_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 	err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 			      OHCI1394_AsReqRcvContextControlSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 		goto fail_misc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 			      OHCI1394_AsRspRcvContextControlSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		goto fail_arreq_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	err = context_init(&ohci->at_request_ctx, ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 			   OHCI1394_AsReqTrContextControlSet, handle_at_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 		goto fail_arrsp_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	err = context_init(&ohci->at_response_ctx, ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 			   OHCI1394_AsRspTrContextControlSet, handle_at_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 		goto fail_atreq_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	ohci->ir_context_channels = ~0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	ohci->ir_context_mask = ohci->ir_context_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	ohci->n_ir = hweight32(ohci->ir_context_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	size = sizeof(struct iso_context) * ohci->n_ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	/* JMicron JMB38x often shows 0 at first read, just ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	if (!ohci->it_context_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 		ohci_notice(ohci, "overriding IsoXmitIntMask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		ohci->it_context_support = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	ohci->it_context_mask = ohci->it_context_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	ohci->n_it = hweight32(ohci->it_context_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	size = sizeof(struct iso_context) * ohci->n_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	ohci->it_context_list = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 		goto fail_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 	ohci->self_id     = ohci->misc_buffer     + PAGE_SIZE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	bus_options = reg_read(ohci, OHCI1394_BusOptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	max_receive = (bus_options >> 12) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	link_speed = bus_options & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 		reg_read(ohci, OHCI1394_GUIDLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	if (!(ohci->quirks & QUIRK_NO_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 		pci_enable_msi(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	if (request_irq(dev->irq, irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 			pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 			ohci_driver_name, ohci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 		goto fail_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 	err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		goto fail_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	ohci_notice(ohci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 		    "added OHCI v%x.%x device as card %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 		    "%d IR + %d IT contexts, quirks 0x%x%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 		    version >> 16, version & 0xff, ohci->card.index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 		    ohci->n_ir, ohci->n_it, ohci->quirks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		    reg_read(ohci, OHCI1394_PhyUpperBound) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 			", physUB" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715)  fail_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	free_irq(dev->irq, ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)  fail_msi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	pci_disable_msi(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)  fail_contexts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	kfree(ohci->ir_context_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	kfree(ohci->it_context_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	context_release(&ohci->at_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)  fail_atreq_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	context_release(&ohci->at_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)  fail_arrsp_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	ar_context_release(&ohci->ar_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)  fail_arreq_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	ar_context_release(&ohci->ar_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)  fail_misc_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	dma_free_coherent(ohci->card.device, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 			  ohci->misc_buffer, ohci->misc_buffer_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)  fail_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	pci_iounmap(dev, ohci->registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)  fail_iomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	pci_release_region(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736)  fail_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	pci_disable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)  fail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	kfree(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	pmac_ohci_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)  fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) static void pci_remove(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	struct fw_ohci *ohci = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	 * If the removal is happening from the suspend state, LPS won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	 * enabled and host registers (eg., IntMaskClear) won't be accessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 		reg_write(ohci, OHCI1394_IntMaskClear, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 		flush_writes(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	cancel_work_sync(&ohci->bus_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 	fw_core_remove_card(&ohci->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	 * FIXME: Fail all pending packets here, now that the upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	 * layers can't queue any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 	software_reset(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	free_irq(dev->irq, ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 	if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 				  ohci->next_config_rom, ohci->next_config_rom_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	if (ohci->config_rom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 				  ohci->config_rom, ohci->config_rom_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 	ar_context_release(&ohci->ar_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	ar_context_release(&ohci->ar_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	dma_free_coherent(ohci->card.device, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			  ohci->misc_buffer, ohci->misc_buffer_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 	context_release(&ohci->at_request_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	context_release(&ohci->at_response_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	kfree(ohci->it_context_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	kfree(ohci->ir_context_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	pci_disable_msi(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	pci_iounmap(dev, ohci->registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 	pci_release_region(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 	pci_disable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	kfree(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 	pmac_ohci_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	dev_notice(&dev->dev, "removed fw-ohci device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) static int pci_suspend(struct pci_dev *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	struct fw_ohci *ohci = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	software_reset(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	err = pci_save_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		ohci_err(ohci, "pci_save_state failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	err = pci_set_power_state(dev, pci_choose_state(dev, state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 		ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	pmac_ohci_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) static int pci_resume(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	struct fw_ohci *ohci = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	pmac_ohci_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	pci_set_power_state(dev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	pci_restore_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	err = pci_enable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		ohci_err(ohci, "pci_enable_device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 	/* Some systems don't setup GUID register on resume from ram  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	if (!reg_read(ohci, OHCI1394_GUIDLo) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 					!reg_read(ohci, OHCI1394_GUIDHi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 		reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 		reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	err = ohci_enable(&ohci->card, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	ohci_resume_iso_dma(ohci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) static const struct pci_device_id pci_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) MODULE_DEVICE_TABLE(pci, pci_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) static struct pci_driver fw_ohci_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	.name		= ohci_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	.id_table	= pci_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 	.probe		= pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	.remove		= pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 	.resume		= pci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 	.suspend	= pci_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) static int __init fw_ohci_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 	selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	if (!selfid_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	return pci_register_driver(&fw_ohci_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) static void __exit fw_ohci_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	pci_unregister_driver(&fw_ohci_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 	destroy_workqueue(selfid_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) module_init(fw_ohci_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) module_exit(fw_ohci_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) /* Provide a module alias so root-on-sbp2 initrds don't break. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) MODULE_ALIAS("ohci1394");