^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * hcd.h - DesignWare HS OTG Controller host-mode declarations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004-2013 Synopsys, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * notice, this list of conditions, and the following disclaimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * without modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 3. The names of the above-listed copyright holders may not be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * to endorse or promote products derived from this software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * ALTERNATIVELY, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * GNU General Public License ("GPL") as published by the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Foundation; either version 2 of the License, or (at your option) any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifndef __DWC2_HCD_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define __DWC2_HCD_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * This file contains the structures, constants, and interfaces for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Host Contoller Driver (HCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The Host Controller Driver (HCD) is responsible for translating requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * from the USB Driver into the appropriate actions on the DWC_otg controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * It isolates the USBD from the specifics of the controller by providing an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * API to the USBD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct dwc2_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * struct dwc2_host_chan - Software host channel descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @hc_num: Host channel number, used for register address lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @dev_addr: Address of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @ep_num: Endpoint of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @ep_is_in: Endpoint direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @speed: Device speed. One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * - USB_SPEED_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * - USB_SPEED_FULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * - USB_SPEED_HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @ep_type: Endpoint type. One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * - USB_ENDPOINT_XFER_CONTROL: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * - USB_ENDPOINT_XFER_ISOC: 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * - USB_ENDPOINT_XFER_BULK: 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * - USB_ENDPOINT_XFER_INTR: 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @max_packet: Max packet size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @data_pid_start: PID for initial transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * 0: DATA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * 1: DATA2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * 2: DATA1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * 3: MDATA (non-Control EP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * SETUP (Control EP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @multi_count: Number of additional periodic transactions per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * (micro)frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @xfer_buf: Pointer to current transfer buffer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @xfer_dma: DMA address of xfer_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * DWORD aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @xfer_len: Total number of bytes to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @xfer_count: Number of bytes transferred so far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @start_pkt_count: Packet count at start of transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @xfer_started: True if the transfer has been started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @do_ping: True if a PING request should be issued on this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @error_state: True if the error count for this transaction is non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @halt_on_queue: True if this channel should be halted the next time a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * request is queued for the channel. This is necessary in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * slave mode if no request queue space is available when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * an attempt is made to halt the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @halt_pending: True if the host channel has been halted, but the core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * is not finished flushing queued requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @do_split: Enable split for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @complete_split: Enable complete split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @hub_addr: Address of high speed hub for the split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @hub_port: Port of the low/full speed device for the split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @xact_pos: Split transaction position. One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * - DWC2_HCSPLT_XACTPOS_MID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * - DWC2_HCSPLT_XACTPOS_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * - DWC2_HCSPLT_XACTPOS_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * - DWC2_HCSPLT_XACTPOS_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @requests: Number of requests issued for this channel since it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * assigned to the current transfer (not counting PINGs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * @schinfo: Scheduling micro-frame bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * @ntd: Number of transfer descriptors for the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @halt_status: Reason for halting the host channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @hcint: Contents of the HCINT register when the interrupt came
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @qh: QH for the transfer being processed by this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @hc_list_entry: For linking to list of host channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @desc_list_addr: Current QH's descriptor list DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @desc_list_sz: Current QH's descriptor list size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @split_order_list_entry: List entry for keeping track of the order of splits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * This structure represents the state of a single host channel when acting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * host mode. It contains the data items needed to transfer packets to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * endpoint via a host channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct dwc2_host_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u8 hc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned dev_addr:7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned ep_num:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned ep_is_in:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned speed:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned ep_type:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned max_packet:11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned data_pid_start:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned multi_count:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u8 *xfer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dma_addr_t xfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dma_addr_t align_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 xfer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u16 start_pkt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u8 xfer_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u8 do_ping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u8 error_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u8 halt_on_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u8 halt_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u8 do_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u8 complete_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u8 hub_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u8 hub_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u8 xact_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u8 requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u8 schinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u16 ntd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) enum dwc2_halt_status halt_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 hcint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct dwc2_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct list_head hc_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dma_addr_t desc_list_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 desc_list_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct list_head split_order_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct dwc2_hcd_pipe_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u8 dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u8 ep_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u8 pipe_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u8 pipe_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u16 maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u16 maxp_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct dwc2_hcd_iso_packet_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct dwc2_qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct dwc2_hcd_urb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct dwc2_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void *setup_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dma_addr_t setup_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u32 actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 error_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 packet_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u16 interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct dwc2_hcd_pipe_info pipe_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct dwc2_hcd_iso_packet_desc iso_descs[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Phases for control transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) enum dwc2_control_phase {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) DWC2_CONTROL_SETUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) DWC2_CONTROL_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) DWC2_CONTROL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Transaction types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) enum dwc2_transaction_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) DWC2_TRANSACTION_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) DWC2_TRANSACTION_PERIODIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) DWC2_TRANSACTION_NON_PERIODIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) DWC2_TRANSACTION_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* The number of elements per LS bitmap (per port on multi_tt) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) BITS_PER_LONG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * struct dwc2_tt - dwc2 data associated with a usb_tt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @refcount: Number of Queue Heads (QHs) holding a reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * @usb_tt: Pointer back to the official usb_tt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * elements (so sizeof(long) times that in bytes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * This structure is stored in the hcpriv of the official usb_tt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct dwc2_tt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct usb_tt *usb_tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long periodic_bitmaps[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @start_schedule_us: The start time on the main bus schedule. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * the main bus schedule is tightly packed and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * time should be interpreted as tightly packed (so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * instead of 125 us).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @duration_us: How long this transfer goes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct dwc2_hs_transfer_time {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u32 start_schedule_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u16 duration_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * struct dwc2_qh - Software queue head structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * @ep_type: Endpoint type. One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * - USB_ENDPOINT_XFER_CONTROL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * - USB_ENDPOINT_XFER_BULK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * - USB_ENDPOINT_XFER_INT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * - USB_ENDPOINT_XFER_ISOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @ep_is_in: Endpoint direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @maxp_mult: Multiplier for maxp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @dev_speed: Device speed. One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * - USB_SPEED_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * - USB_SPEED_FULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * - USB_SPEED_HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * @data_toggle: Determines the PID of the next data packet for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * non-controltransfers. Ignored for control transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * - DWC2_HC_PID_DATA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * - DWC2_HC_PID_DATA1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @ping_state: Ping state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @do_split: Full/low speed endpoint on high-speed hub requires split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * @td_first: Index of first activated isochronous transfer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @td_last: Index of last activated isochronous transfer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @host_us: Bandwidth in microseconds per transfer as seen by host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @device_us: Bandwidth in microseconds per transfer as seen by device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * @host_interval: Interval between transfers as seen by the host. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * the host is high speed and the device is low speed this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * will be 8 times device interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @device_interval: Interval between transfers as seen by the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @next_active_frame: (Micro)frame _before_ we next need to put something on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * the bus. We'll move the qh to active here. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * host is in high speed mode this will be a uframe. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * the host is in low speed mode this will be a full frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * @start_active_frame: If we are partway through a split transfer, this will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * what next_active_frame was when we started. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * it should always be the same as next_active_frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @num_hs_transfers: Number of transfers in hs_transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Normally this is 1 but can be more than one for splits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Always >= 1 unless the host is in low/full speed mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @hs_transfers: Transfers that are scheduled as seen by the high speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * bus. Not used if host is in low or full speed mode (but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * note that it IS USED if the device is low or full speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * as long as the HOST is in high speed mode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @ls_start_schedule_slice: Start time (in slices) on the low speed bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * schedule that's being used by this device. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * will be on the periodic_bitmap in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * "struct dwc2_tt". Not used if this device is high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * speed. Note that this is in "schedule slice" which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * is tightly packed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @ntd: Actual number of transfer descriptors in a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @dw_align_buf: Used instead of original buffer if its physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * is not dword-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @dw_align_buf_dma: DMA address for dw_align_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * @qtd_list: List of QTDs for this QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @channel: Host channel currently processing transfers for this QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @qh_list_entry: Entry for QH in either the periodic or non-periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * @desc_list: List of transfer descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * @desc_list_dma: Physical address of desc_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * @desc_list_sz: Size of descriptors list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * descriptor and indicates original XferSize value for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * @unreserve_timer: Timer for releasing periodic reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @wait_timer: Timer used to wait before re-queuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @dwc_tt: Pointer to our tt info (or NULL if no tt).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @ttport: Port number within our tt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @tt_buffer_dirty True if clear_tt_buffer_complete is pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @unreserve_pending: True if we planned to unreserve but haven't yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @schedule_low_speed: True if we have a low/full speed component (either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * host is in low/full speed mode or do_split).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @want_wait: We should wait before re-queuing; only matters for non-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * periodic transfers and is ignored for periodic ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @wait_timer_cancel: Set to true to cancel the wait_timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * @tt_buffer_dirty: True if EP's TT buffer is not clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * A Queue Head (QH) holds the static characteristics of an endpoint and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * be entered in either the non-periodic or periodic schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct dwc2_qh {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct dwc2_hsotg *hsotg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u8 ep_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) u8 ep_is_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) u16 maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u16 maxp_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u8 dev_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u8 data_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u8 ping_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u8 do_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u8 td_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u8 td_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u16 host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u16 device_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u16 host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u16 device_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u16 next_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u16 start_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) s16 num_hs_transfers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 ls_start_schedule_slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u16 ntd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u8 *dw_align_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dma_addr_t dw_align_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct list_head qtd_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct dwc2_host_chan *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct list_head qh_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct dwc2_dma_desc *desc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dma_addr_t desc_list_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 desc_list_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u32 *n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct timer_list unreserve_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct hrtimer wait_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct dwc2_tt *dwc_tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int ttport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unsigned tt_buffer_dirty:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned unreserve_pending:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned schedule_low_speed:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned want_wait:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned wait_timer_cancel:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * struct dwc2_qtd - Software queue transfer descriptor (QTD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @control_phase: Current phase for control transfers (Setup, Data, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @in_process: Indicates if this QTD is currently processed by HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @data_toggle: Determines the PID of the next data packet for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * data phase of control transfers. Ignored for other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * transfer types. One of the following values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * - DWC2_HC_PID_DATA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * - DWC2_HC_PID_DATA1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * @complete_split: Keeps track of the current split type for FS/LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * endpoints on a HS Hub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @isoc_split_pos: Position of the ISOC split in full/low speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @isoc_frame_index: Index of the next frame descriptor for an isochronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * transfer. A frame descriptor describes the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * position and length of the data to be transferred in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * next scheduled (micro)frame of an isochronous transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * It also holds status for that transaction. The frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * index starts at 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @isoc_split_offset: Position of the ISOC split in the buffer for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * current frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @error_count: Holds the number of bus errors that have occurred for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * a transaction within this transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @n_desc: Number of DMA descriptors for this QTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @isoc_frame_index_last: Last activated frame (packet) index, used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * descriptor DMA mode only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @num_naks: Number of NAKs received on this QTD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @urb: URB for this transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @qh: Queue head for this QTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * @qtd_list_entry: For linking to the QH's list of QTDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @isoc_td_first: Index of first activated isochronous transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * descriptor in Descriptor DMA mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @isoc_td_last: Index of last activated isochronous transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * descriptor in Descriptor DMA mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * interrupt, or isochronous transfer. A single QTD is created for each URB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * (of one of these types) submitted to the HCD. The transfer associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * a QTD may require one or multiple transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * A QTD is linked to a Queue Head, which is entered in either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * non-periodic or periodic schedule for execution. When a QTD is chosen for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * execution, some or all of its transactions may be executed. After
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * execution, the state of the QTD is updated. The QTD may be retired if all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * its transactions are complete or if an error occurred. Otherwise, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * remains in the schedule so more transactions can be executed later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct dwc2_qtd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) enum dwc2_control_phase control_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u8 in_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u8 data_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u8 complete_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u8 isoc_split_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u16 isoc_frame_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u16 isoc_split_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u16 isoc_td_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u16 isoc_td_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u32 ssplit_out_xfer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u8 error_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u8 n_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u16 isoc_frame_index_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u16 num_naks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct dwc2_hcd_urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct dwc2_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct list_head qtd_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct hc_xfer_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct dwc2_hsotg *hsotg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct dwc2_host_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return (struct usb_hcd *)hsotg->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Inline used to disable one channel interrupt. Channel interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * disabled when the channel is halted or released by the interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * There is no need to handle further interrupts of that type until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * channel is re-assigned. In fact, subsequent handling may cause crashes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * because the channel structures are cleaned up when the channel is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) u32 mask = dwc2_readl(hsotg, HCINTMSK(chnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) mask &= ~intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dwc2_writel(hsotg, mask, HCINTMSK(chnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) enum dwc2_halt_status halt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct dwc2_host_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * are read as 1, they won't clear when written back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) u32 hprt0 = dwc2_readl(hsotg, HPRT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return hprt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return pipe->ep_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return pipe->pipe_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return pipe->maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return pipe->maxp_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return pipe->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return pipe->pipe_dir == USB_DIR_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return !dwc2_hcd_is_pipe_in(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int dwc2_hcd_init(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Transaction Execution Functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) enum dwc2_transaction_type dwc2_hcd_select_transactions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) enum dwc2_transaction_type tr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Schedule Queue Functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* Implemented in hcd_queue.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct dwc2_hcd_urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) gfp_t mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int sched_csplit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct dwc2_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Unlinks and frees a QTD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) list_del(&qtd->qtd_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) kfree(qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Descriptor DMA support functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct dwc2_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct dwc2_host_chan *chan, int chnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) enum dwc2_halt_status halt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) gfp_t mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Check if QH is non-periodic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) #define dwc2_qh_is_non_per(_qh_ptr_) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static inline bool dbg_qh(struct dwc2_qh *qh) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static inline bool dbg_urb(struct urb *urb) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static inline bool dbg_perio(void) { return true; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static inline bool dbg_hc(struct dwc2_host_chan *hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return hc->ep_type == USB_ENDPOINT_XFER_BULK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) hc->ep_type == USB_ENDPOINT_XFER_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline bool dbg_qh(struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return qh->ep_type == USB_ENDPOINT_XFER_BULK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) qh->ep_type == USB_ENDPOINT_XFER_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static inline bool dbg_urb(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return usb_pipetype(urb->pipe) == PIPE_BULK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) usb_pipetype(urb->pipe) == PIPE_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static inline bool dbg_perio(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * Returns true if frame1 index is greater than frame2 index. The comparison
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * frame number when the max index frame number is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) u16 diff = fr_idx1 - fr_idx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u16 sign = diff & (FRLISTEN_64_SIZE >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return diff && !sign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Returns true if frame1 is less than or equal to frame2. The comparison is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * frame number when the max frame number is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * Returns true if frame1 is greater than frame2. The comparison is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * number when the max frame number is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return (frame1 != frame2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * Increments frame by the amount specified by inc. The addition is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return (frame + inc) & HFNUM_MAX_FRNUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static inline u16 dwc2_full_frame_num(u16 frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return (frame & HFNUM_MAX_FRNUM) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static inline u16 dwc2_micro_frame_num(u16 frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return frame & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Returns the Core Interrupt Status register contents, ANDed with the Core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Interrupt Mask register contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return dwc2_readl(hsotg, GINTSTS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dwc2_readl(hsotg, GINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return dwc2_urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static inline u32 dwc2_hcd_urb_get_actual_length(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct dwc2_hcd_urb *dwc2_urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return dwc2_urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return dwc2_urb->error_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static inline void dwc2_hcd_urb_set_iso_desc_params(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) dwc2_urb->iso_descs[desc_num].offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dwc2_urb->iso_descs[desc_num].length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static inline u32 dwc2_hcd_urb_get_iso_desc_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct dwc2_hcd_urb *dwc2_urb, int desc_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return dwc2_urb->iso_descs[desc_num].status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct dwc2_hcd_urb *dwc2_urb, int desc_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return dwc2_urb->iso_descs[desc_num].actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct dwc2_qh *qh = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (qh && !list_empty(&qh->qh_list_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct dwc2_qh *qh = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return qh->host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct dwc2_host_chan *chan, int chnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct dwc2_qtd *qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* HCD Core API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * dwc2_handle_hcd_intr() - Called on every hardware interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * @hsotg: The DWC2 HCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Returns IRQ_HANDLED if interrupt is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Return IRQ_NONE if interrupt is not handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * @hsotg: The DWC2 HCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * and 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * @hsotg: The DWC2 HCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * dwc2_hcd_dump_state() - Dumps hsotg state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * @hsotg: The DWC2 HCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * NOTE: This function will be removed once the peripheral controller code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * is integrated and the driver is stable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* URB interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Transfer flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) #define URB_GIVEBACK_ASAP 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) #define URB_SEND_ZERO_PACKET 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* Host driver callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void *context, gfp_t mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int *ttport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct dwc2_tt *dwc_tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) #endif /* __DWC2_HCD_H__ */