^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for the NXP ISP1760 chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * However, the code might contain some bugs. What doesn't work for sure is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * - ISO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * - OTG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) e The interrupt line is configured as active low, level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/usb/hcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "isp1760-core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "isp1760-hcd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "isp1760-regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static struct kmem_cache *qtd_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct kmem_cache *qh_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static struct kmem_cache *urb_listitem_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct isp1760_qtd *qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return *(struct isp1760_hcd **)hcd->hcd_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* urb state*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DELETE_URB (0x0008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NO_TRANSFER_ACTIVE (0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Philips Proprietary Transfer Descriptor (PTD) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) typedef __u32 __bitwise __dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ptd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __dw dw0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __dw dw1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __dw dw2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __dw dw3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __dw dw4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __dw dw5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __dw dw6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __dw dw7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PTD_OFFSET 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define ISO_PTD_OFFSET 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define INT_PTD_OFFSET 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define ATL_PTD_OFFSET 0x0c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PAYLOAD_OFFSET 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* ATL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* DW0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define DW0_VALID_BIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define FROM_DW0_VALID(x) ((x) & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define TO_DW0_LENGTH(x) (((u32) x) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define TO_DW0_MAXPACKET(x) (((u32) x) << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define TO_DW0_MULTI(x) (((u32) x) << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define TO_DW0_ENDPOINT(x) (((u32) x) << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* DW1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define TO_DW1_PID_TOKEN(x) (((u32) x) << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define DW1_TRANS_BULK ((u32) 2 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define DW1_TRANS_INT ((u32) 3 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define DW1_TRANS_SPLIT ((u32) 1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define DW1_SE_USB_LOSPEED ((u32) 2 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define TO_DW1_PORT_NUM(x) (((u32) x) << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define TO_DW1_HUB_NUM(x) (((u32) x) << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* DW2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define TO_DW2_RL(x) ((x) << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define FROM_DW2_RL(x) (((x) >> 25) & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* DW3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TO_DW3_NAKCOUNT(x) ((x) << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define TO_DW3_CERR(x) ((x) << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define FROM_DW3_CERR(x) (((x) >> 23) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TO_DW3_DATA_TOGGLE(x) ((x) << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define TO_DW3_PING(x) ((x) << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define FROM_DW3_PING(x) (((x) >> 26) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define DW3_ERROR_BIT (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define DW3_BABBLE_BIT (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define DW3_HALT_BIT (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define DW3_ACTIVE_BIT (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define FROM_DW3_ACTIVE(x) (((x) >> 31) & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define INT_UNDERRUN (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define INT_BABBLE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define INT_EXACT (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SETUP_PID (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define IN_PID (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define OUT_PID (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Errata 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define RL_COUNTER (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define NAK_COUNTER (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define ERR_COUNTER (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct isp1760_qtd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u8 packet_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void *data_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u32 payload_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* the rest is HCD-private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct list_head qtd_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) size_t actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* QTD_ENQUEUED: waiting for transfer (inactive) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) interrupt handler may touch this qtd! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* QTD_XFER_COMPLETE: payload has been transferred successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* QTD_RETIRE: transfer error/abort qtd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define QTD_ENQUEUED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define QTD_PAYLOAD_ALLOC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define QTD_XFER_STARTED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define QTD_XFER_COMPLETE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define QTD_RETIRE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Queue head, one for each active endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct isp1760_qh {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct list_head qh_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct list_head qtd_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u32 toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 ping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct urb_listitem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct list_head urb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Access functions for isp176x registers (addresses 0..0x03FF).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static u32 reg_read32(void __iomem *base, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return isp1760_read32(base, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void reg_write32(void __iomem *base, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) isp1760_write32(base, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Access functions for isp176x memory (offset >= 0x0400).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * bank_reads8() reads memory locations prefetched by an earlier write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * bank optimizations, you should use the more generic mem_reads8() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * For access to ptd memory, use the specialized ptd_read() and ptd_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * doesn't quite work because some people have to enforce 32-bit access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __u32 *dst, u32 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __u32 __iomem *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __u8 *src_byteptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __u8 *dst_byteptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) src = src_base + (bank_addr | src_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (src_offset < PAYLOAD_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) while (bytes >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *dst = le32_to_cpu(__raw_readl(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bytes -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) while (bytes >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *dst = __raw_readl(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bytes -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (src_offset < PAYLOAD_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) val = le32_to_cpu(__raw_readl(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) val = __raw_readl(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dst_byteptr = (void *) dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) src_byteptr = (void *) &val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) while (bytes > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *dst_byteptr = *src_byteptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dst_byteptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) src_byteptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bytes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ndelay(90);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) __u32 const *src, u32 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) __u32 __iomem *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dst = dst_base + dst_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (dst_offset < PAYLOAD_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) while (bytes >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __raw_writel(cpu_to_le32(*src), dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bytes -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) while (bytes >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) __raw_writel(*src, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bytes -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * extra bytes should not be read by the HW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (dst_offset < PAYLOAD_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) __raw_writel(cpu_to_le32(*src), dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __raw_writel(*src, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct ptd *ptd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) reg_write32(base, HC_MEMORY_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ndelay(90);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) (void *) ptd, sizeof(*ptd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct ptd *ptd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) &ptd->dw1, 7*sizeof(ptd->dw1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Make sure dw0 gets written last (after other dw's and after payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) since it contains the enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sizeof(ptd->dw0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* memory management of the 60kb on the chip from 0x1000 to 0xffff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void init_memory(struct isp1760_hcd *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int i, curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u32 payload_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) payload_addr = PAYLOAD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (i = 0; i < BLOCK_1_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) priv->memory_pool[i].start = payload_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) priv->memory_pool[i].size = BLOCK_1_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) priv->memory_pool[i].free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) payload_addr += priv->memory_pool[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) curr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (i = 0; i < BLOCK_2_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) priv->memory_pool[curr + i].start = payload_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) priv->memory_pool[curr + i].free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) payload_addr += priv->memory_pool[curr + i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) curr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) for (i = 0; i < BLOCK_3_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) priv->memory_pool[curr + i].start = payload_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) priv->memory_pool[curr + i].free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) payload_addr += priv->memory_pool[curr + i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) WARN_ON(qtd->payload_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!qtd->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) for (i = 0; i < BLOCKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (priv->memory_pool[i].size >= qtd->length &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) priv->memory_pool[i].free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) priv->memory_pool[i].free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) qtd->payload_addr = priv->memory_pool[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!qtd->payload_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) for (i = 0; i < BLOCKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (priv->memory_pool[i].start == qtd->payload_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) WARN_ON(priv->memory_pool[i].free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) priv->memory_pool[i].free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) qtd->payload_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) __func__, qtd->payload_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) qtd->payload_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int handshake(struct usb_hcd *hcd, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 mask, u32 done, int usec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ret = readl_poll_timeout_atomic(hcd->regs + reg, result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ((result & mask) == done ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) result == U32_MAX), 1, usec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (result == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* reset a non-running (STS_HALT == 1) controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int ehci_reset(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 command = reg_read32(hcd->regs, HC_USBCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) command |= CMD_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) reg_write32(hcd->regs, HC_USBCMD, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) hcd->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) priv->next_statechange = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return handshake(hcd, HC_USBCMD, CMD_RESET, 0, 250 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static struct isp1760_qh *qh_alloc(gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct isp1760_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) qh = kmem_cache_zalloc(qh_cachep, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) INIT_LIST_HEAD(&qh->qh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) INIT_LIST_HEAD(&qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) qh->slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void qh_free(struct isp1760_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) WARN_ON(!list_empty(&qh->qtd_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) WARN_ON(qh->slot > -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) kmem_cache_free(qh_cachep, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* one-time init, only for memory state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int priv_init(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u32 hcc_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) for (i = 0; i < QH_END; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) INIT_LIST_HEAD(&priv->qh_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * hw default: 1K periodic list heads, one per frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * periodic_size can shrink by USBCMD update if hcc_params allows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) priv->periodic_size = DEFAULT_I_TDPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* controllers may cache some of the periodic schedule ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* full frame cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (HCC_ISOC_CACHE(hcc_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) priv->i_thresh = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else /* N microframes cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) priv->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static int isp1760_hc_setup(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 scratch, hwmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Change bus pattern */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (scratch != 0xdeadbabe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev_err(hcd->self.controller, "Scratch test failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * The RESET_HC bit in the SW_RESET register is supposed to reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * host controller without touching the CPU interface registers, but at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * least on the ISP1761 it seems to behave as the RESET_ALL bit and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * reset the whole device. We thus can't use it here, so let's reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * the host controller through the EHCI USB Command register. The device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * has been reset in core code anyway, so this shouldn't matter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) result = ehci_reset(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Step 11 passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* ATL reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) hwmode = reg_read32(hcd->regs, HC_HW_MODE_CTRL) & ~ALL_ATX_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return priv_init(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static u32 base_to_chip(u32 base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return ((base - 0x400) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) urb = qtd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return (qtd->urb != urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* magic numbers that can affect system performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) #define EHCI_TUNE_RL_TT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #define EHCI_TUNE_MULT_TT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void create_ptd_atl(struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct isp1760_qtd *qtd, struct ptd *ptd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) u32 maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u32 multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u32 rl = RL_COUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) u32 nak = NAK_COUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) memset(ptd, 0, sizeof(*ptd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* according to 3.6.2, max packet len can not be > 0x400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) usb_pipeout(qtd->urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) multi = 1 + ((maxpacket >> 11) & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) maxpacket &= 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* DW0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ptd->dw0 = DW0_VALID_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* DW1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (usb_pipebulk(qtd->urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ptd->dw1 |= DW1_TRANS_BULK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) else if (usb_pipeint(qtd->urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ptd->dw1 |= DW1_TRANS_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* split transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ptd->dw1 |= DW1_TRANS_SPLIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (qtd->urb->dev->speed == USB_SPEED_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ptd->dw1 |= DW1_SE_USB_LOSPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* SE bit for Split INT transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (usb_pipeint(qtd->urb->pipe) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) (qtd->urb->dev->speed == USB_SPEED_LOW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ptd->dw1 |= 2 << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) nak = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ptd->dw0 |= TO_DW0_MULTI(multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (usb_pipecontrol(qtd->urb->pipe) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) usb_pipebulk(qtd->urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ptd->dw3 |= TO_DW3_PING(qh->ping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* DW2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ptd->dw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ptd->dw2 |= TO_DW2_RL(rl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* DW3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (usb_pipecontrol(qtd->urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (qtd->data_buffer == qtd->urb->setup_packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) else if (last_qtd_of_urb(qtd, qh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ptd->dw3 |= DW3_ACTIVE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Cerr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static void transform_add_int(struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct isp1760_qtd *qtd, struct ptd *ptd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 usof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) u32 period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Most of this is guessing. ISP1761 datasheet is quite unclear, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * the algorithm from the original Philips driver code, which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * pretty much used in this driver before as well, is quite horrendous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * and, i believe, incorrect. The code below follows the datasheet and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * more reliable this way (fingers crossed...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* urb->interval is in units of microframes (1/8 ms) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) period = qtd->urb->interval >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (qtd->urb->interval > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) usof = 0x01; /* One bit set =>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) interval 1 ms * uFrame-match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) else if (qtd->urb->interval > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) usof = 0x22; /* Two bits set => interval 1/2 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) else if (qtd->urb->interval > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) usof = 0x55; /* Four bits set => interval 1/4 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) usof = 0xff; /* All bits set => interval 1/8 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* urb->interval is in units of frames (1 ms) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) period = qtd->urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) usof = 0x0f; /* Execute Start Split on any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) four first uFrames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * First 8 bits in dw5 is uSCS and "specifies which uSOF the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * complete split needs to be sent. Valid only for IN." Also,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * "All bits can be set to one for every transfer." (p 82,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * that number come from? 0xff seems to work fine...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* ptd->dw5 = 0x1c; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) period = period >> 1;/* Ensure equal or shorter period than requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ptd->dw2 |= period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ptd->dw4 = usof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void create_ptd_int(struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct isp1760_qtd *qtd, struct ptd *ptd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) create_ptd_atl(qh, qtd, ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) transform_add_int(qh, qtd, ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __releases(priv->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) __acquires(priv->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (!urb->unlinked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (urb->status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) urb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) for (ptr = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ptr < urb->transfer_buffer + urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ptr += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) flush_dcache_page(virt_to_page(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* complete() can reenter this HCD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) usb_hcd_giveback_urb(hcd, urb, urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u8 packet_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct isp1760_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) qtd = kmem_cache_zalloc(qtd_cachep, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) INIT_LIST_HEAD(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) qtd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) qtd->packet_type = packet_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) qtd->status = QTD_ENQUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) qtd->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static void qtd_free(struct isp1760_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) WARN_ON(qtd->payload_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) kmem_cache_free(qtd_cachep, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct isp1760_slotinfo *slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct isp1760_qtd *qtd, struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct ptd *ptd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int skip_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) WARN_ON((slot < 0) || (slot > 31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) WARN_ON(qtd->length && !qtd->payload_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) WARN_ON(slots[slot].qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) WARN_ON(slots[slot].qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Make sure done map has not triggered from some unlinked transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ptd_offset == ATL_PTD_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) priv->atl_done_map |= reg_read32(hcd->regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) HC_ATL_PTD_DONEMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) priv->atl_done_map &= ~(1 << slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) priv->int_done_map |= reg_read32(hcd->regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) HC_INT_PTD_DONEMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) priv->int_done_map &= ~(1 << slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) qh->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) qtd->status = QTD_XFER_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) slots[slot].timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) slots[slot].qtd = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) slots[slot].qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ptd_write(hcd->regs, ptd_offset, slot, ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (ptd_offset == ATL_PTD_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) skip_map &= ~(1 << qh->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) skip_map &= ~(1 << qh->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static int is_short_bulk(struct isp1760_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return (usb_pipebulk(qtd->urb->pipe) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) (qtd->actual_length < qtd->length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct list_head *urb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) int last_qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct isp1760_qtd *qtd, *qtd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct urb_listitem *urb_listitem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (qtd->status < QTD_XFER_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) last_qtd = last_qtd_of_urb(qtd, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if ((!last_qtd) && (qtd->status == QTD_RETIRE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) qtd_next->status = QTD_RETIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (qtd->status == QTD_XFER_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (qtd->actual_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) switch (qtd->packet_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) case IN_PID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) mem_reads8(hcd->regs, qtd->payload_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) qtd->data_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) qtd->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) case OUT_PID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) qtd->urb->actual_length +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) qtd->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) case SETUP_PID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (is_short_bulk(qtd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) qtd->urb->status = -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!last_qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) qtd_next->status = QTD_RETIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (qtd->payload_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) free_mem(hcd, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (last_qtd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if ((qtd->status == QTD_RETIRE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) (qtd->urb->status == -EINPROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) qtd->urb->status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Defer calling of urb_done() since it releases lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (unlikely(!urb_listitem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break; /* Try again on next call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) urb_listitem->urb = qtd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) list_add_tail(&urb_listitem->urb_list, urb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) list_del(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) qtd_free(qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #define ENQUEUE_DEPTH 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int ptd_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct isp1760_slotinfo *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int curr_slot, free_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct ptd ptd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct isp1760_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (unlikely(list_empty(&qh->qtd_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* Make sure this endpoint's TT buffer is clean before queueing ptds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (qh->tt_buffer_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) qtd_list)->urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ptd_offset = INT_PTD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) slots = priv->int_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) ptd_offset = ATL_PTD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) slots = priv->atl_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) free_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) for (curr_slot = 0; curr_slot < 32; curr_slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) free_slot = curr_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (slots[curr_slot].qh == qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (qtd->status == QTD_ENQUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) WARN_ON(qtd->payload_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) alloc_mem(hcd, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if ((qtd->length) && (!qtd->payload_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if ((qtd->length) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ((qtd->packet_type == SETUP_PID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) (qtd->packet_type == OUT_PID))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) mem_writes8(hcd->regs, qtd->payload_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) qtd->data_buffer, qtd->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) qtd->status = QTD_PAYLOAD_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (qtd->status == QTD_PAYLOAD_ALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if ((curr_slot > 31) && (free_slot == -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dev_dbg(hcd->self.controller, "%s: No slot "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) "available for transfer\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* Start xfer for this endpoint if not already done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if ((curr_slot > 31) && (free_slot > -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (usb_pipeint(qtd->urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) create_ptd_int(qh, qtd, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) create_ptd_atl(qh, qtd, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) start_bus_transfer(hcd, ptd_offset, free_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) slots, qtd, qh, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) curr_slot = free_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (n >= ENQUEUE_DEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static void schedule_ptds(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct isp1760_hcd *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct isp1760_qh *qh, *qh_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct list_head *ep_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) LIST_HEAD(urb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct urb_listitem *urb_listitem, *urb_listitem_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (!hcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * check finished/retired xfers, transfer payloads, call urb_done()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) for (i = 0; i < QH_END; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ep_queue = &priv->qh_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) collect_qtds(hcd, qh, &urb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (list_empty(&qh->qtd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) list_del(&qh->qh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) urb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) isp1760_urb_done(hcd, urb_listitem->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) kmem_cache_free(urb_listitem_cachep, urb_listitem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * Schedule packets for transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * According to USB2.0 specification:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * 1st prio: interrupt xfers, up to 80 % of bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * 2nd prio: control xfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * 3rd prio: bulk xfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * ... but let's use a simpler scheme here (mostly because ISP1761 doc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * is very unclear on how to prioritize traffic):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * 1) Enqueue any queued control transfers, as long as payload chip mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * and PTD ATL slots are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * 2) Enqueue any queued INT transfers, as long as payload chip mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * and PTD INT slots are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * 3) Enqueue any queued bulk transfers, as long as payload chip mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * and PTD ATL slots are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * conservation of chip mem and performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * I'm sure this scheme could be improved upon!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) for (i = 0; i < QH_END; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ep_queue = &priv->qh_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) enqueue_qtds(hcd, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) #define PTD_STATE_QTD_DONE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) #define PTD_STATE_QTD_RELOAD 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) #define PTD_STATE_URB_RETIRE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) __dw dw4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) dw4 = ptd->dw4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) dw4 >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) need to handle these errors? Is it done in hardware? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (ptd->dw3 & DW3_HALT_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) urb->status = -EPROTO; /* Default unknown error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) switch (dw4 & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) case INT_UNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dev_dbg(hcd->self.controller, "%s: underrun "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "during uFrame %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) __func__, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) urb->status = -ECOMM; /* Could not write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) case INT_EXACT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dev_dbg(hcd->self.controller, "%s: transaction "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) "error during uFrame %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) __func__, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) urb->status = -EPROTO; /* timeout, bad CRC, PID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) error etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case INT_BABBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) dev_dbg(hcd->self.controller, "%s: babble "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) "error during uFrame %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) __func__, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) urb->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dw4 >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return PTD_STATE_URB_RETIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return PTD_STATE_QTD_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) WARN_ON(!ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (ptd->dw3 & DW3_HALT_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ptd->dw3 & DW3_BABBLE_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) urb->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) else if (FROM_DW3_CERR(ptd->dw3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) urb->status = -EPIPE; /* Stall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) urb->status = -EPROTO; /* Unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) dev_dbg(hcd->self.controller, "%s: ptd error:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return PTD_STATE_URB_RETIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* Transfer Error, *but* active and no HALT -> reload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return PTD_STATE_QTD_RELOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * NAKs are handled in HW by the chip. Usually if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * device is not able to send data fast enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * This happens mostly on slower hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return PTD_STATE_QTD_RELOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return PTD_STATE_QTD_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static void handle_done_ptds(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct ptd ptd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct isp1760_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct isp1760_slotinfo *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u32 ptd_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct isp1760_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int modified;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) int skip_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) priv->int_done_map &= ~skip_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) priv->atl_done_map &= ~skip_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) modified = priv->int_done_map || priv->atl_done_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) while (priv->int_done_map || priv->atl_done_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (priv->int_done_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* INT ptd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) slot = __ffs(priv->int_done_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) priv->int_done_map &= ~(1 << slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) slots = priv->int_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* This should not trigger, and could be removed if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) noone have any problems with it triggering: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!slots[slot].qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ptd_offset = INT_PTD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) state = check_int_transfer(hcd, &ptd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) slots[slot].qtd->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* ATL ptd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) slot = __ffs(priv->atl_done_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) priv->atl_done_map &= ~(1 << slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) slots = priv->atl_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /* This should not trigger, and could be removed if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) noone have any problems with it triggering: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (!slots[slot].qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ptd_offset = ATL_PTD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) state = check_atl_transfer(hcd, &ptd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) slots[slot].qtd->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) qtd = slots[slot].qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) slots[slot].qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) qh = slots[slot].qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) slots[slot].qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) qh->slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) WARN_ON(qtd->status != QTD_XFER_STARTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) case PTD_STATE_QTD_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if ((usb_pipeint(qtd->urb->pipe)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) (qtd->urb->dev->speed != USB_SPEED_HIGH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) qtd->actual_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) qtd->actual_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) qtd->status = QTD_XFER_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) is_short_bulk(qtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) qtd = list_entry(qtd->qtd_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) typeof(*qtd), qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) qh->ping = FROM_DW3_PING(ptd.dw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) qtd->status = QTD_PAYLOAD_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ptd.dw0 |= DW0_VALID_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* RL counter = ERR counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ptd.dw3 &= ~TO_DW3_CERR(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) qh->ping = FROM_DW3_PING(ptd.dw3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) case PTD_STATE_URB_RETIRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) qtd->status = QTD_RETIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) (qtd->urb->status != -EPIPE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) (qtd->urb->status != -EREMOTEIO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) qh->tt_buffer_dirty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (usb_hub_clear_tt_buffer(qtd->urb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Clear failed; let's hope things work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) qh->tt_buffer_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) qh->toggle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) qh->ping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (slots == priv->int_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (state == PTD_STATE_QTD_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) dev_err(hcd->self.controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) "%s: PTD_STATE_QTD_RELOAD on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) "interrupt packet\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (state != PTD_STATE_QTD_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) create_ptd_int(qh, qtd, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (state != PTD_STATE_QTD_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) create_ptd_atl(qh, qtd, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) qh, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (modified)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) schedule_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) u32 imask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) irqreturn_t irqret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (!(hcd->state & HC_STATE_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (unlikely(!imask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) handle_done_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) irqret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return irqret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * Workaround for problem described in chip errata 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * One solution suggested in the errata is to use SOF interrupts _instead_of_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * ATL done interrupts (the "instead of" might be important since it seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * to set the PTD's done bit in addition to not generating an interrupt!).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * done bit is not being set. This is bad - it blocks the endpoint until reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * If we use SOF interrupts only, we get latency between ptd completion and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * actual handling. This is very noticeable in testusb runs which takes several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * minutes longer without ATL interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * completed and its done map bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * not to cause too much lag when this HW bug occurs, while still hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * ensuring that the check does not falsely trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #define SLOT_TIMEOUT 300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #define SLOT_CHECK_PERIOD 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static struct timer_list errata2_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static struct usb_hcd *errata2_timer_hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void errata2_function(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct usb_hcd *hcd = errata2_timer_hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct ptd ptd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) unsigned long spinflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) spin_lock_irqsave(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) for (slot = 0; slot < 32; slot++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (priv->atl_slots[slot].qh && time_after(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) priv->atl_slots[slot].timestamp +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) msecs_to_jiffies(SLOT_TIMEOUT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (!FROM_DW0_VALID(ptd.dw0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) !FROM_DW3_ACTIVE(ptd.dw3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) priv->atl_done_map |= 1 << slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (priv->atl_done_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) handle_done_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) spin_unlock_irqrestore(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) add_timer(&errata2_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int isp1760_run(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) u32 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) u32 chipid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) hcd->uses_new_polling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) hcd->state = HC_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* Set PTD interrupt AND & OR maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* step 23 passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) command = reg_read32(hcd->regs, HC_USBCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) command &= ~(CMD_LRESET|CMD_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) command |= CMD_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) reg_write32(hcd->regs, HC_USBCMD, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * Spec says to write FLAG_CF as last config action, priv code grabs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * the semaphore while doing so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) down_write(&ehci_cf_port_reset_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) up_write(&ehci_cf_port_reset_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) errata2_timer_hcd = hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) timer_setup(&errata2_timer, errata2_function, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) add_timer(&errata2_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) chipid & 0xffff, chipid >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* PTD Register Init Part 2, Step 28 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* Setup registers controlling PTD checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ATL_BUF_FILL | INT_BUF_FILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* GRR this is run-once init(), being done every time the HC starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * So long as they're part of class devices, we can't do it init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * since the class device isn't created that early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) qtd->data_buffer = databuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (len > MAX_PAYLOAD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) len = MAX_PAYLOAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) qtd->length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return qtd->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static void qtd_list_free(struct list_head *qtd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct isp1760_qtd *qtd, *qtd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) list_del(&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) qtd_free(qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * Also calculate the PID type (SETUP/IN/OUT) for each packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static void packetize_urb(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct urb *urb, struct list_head *head, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct isp1760_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) int len, maxpacketsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) u8 packet_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * URBs map to sequences of QTDs: one logical transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!urb->transfer_buffer && urb->transfer_buffer_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* XXX This looks like usb storage / SCSI bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dev_err(hcd->self.controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) "buf is null, dma is %08lx len is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) (long unsigned)urb->transfer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (usb_pipein(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) packet_type = IN_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) packet_type = OUT_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (usb_pipecontrol(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) qtd = qtd_alloc(flags, urb, SETUP_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (!qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* for zero length DATA stages, STATUS is always IN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (urb->transfer_buffer_length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) packet_type = IN_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) usb_pipeout(urb->pipe)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * buffer gets wrapped in one or more qtds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * last one may be "short" (including zero len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * and may serve as a control status ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) buf = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) len = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) qtd = qtd_alloc(flags, urb, packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) this_qtd_len = qtd_fill(qtd, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) len -= this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) buf += this_qtd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * control requests may need a terminating data "status" ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * bulk ones may need a terminating short packet (zero length).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (urb->transfer_buffer_length != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) int one_more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (usb_pipecontrol(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) one_more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (packet_type == IN_PID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) packet_type = OUT_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) packet_type = IN_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) } else if (usb_pipebulk(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) && (urb->transfer_flags & URB_ZERO_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) && !(urb->transfer_buffer_length %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) maxpacketsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) one_more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (one_more) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) qtd = qtd_alloc(flags, urb, packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (!qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* never any data in such packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) qtd_fill(qtd, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) list_add_tail(&qtd->qtd_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) qtd_list_free(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct list_head *ep_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct isp1760_qh *qh, *qhit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unsigned long spinflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) LIST_HEAD(new_qtds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) int qh_in_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) switch (usb_pipetype(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) ep_queue = &priv->qh_list[QH_CONTROL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ep_queue = &priv->qh_list[QH_BULK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (urb->interval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* FIXME: Check bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ep_queue = &priv->qh_list[QH_INTERRUPT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) dev_err(hcd->self.controller, "%s: isochronous USB packets "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) "not yet supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) dev_err(hcd->self.controller, "%s: unknown pipe type\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (usb_pipein(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) urb->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) packetize_urb(hcd, urb, &new_qtds, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (list_empty(&new_qtds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) spin_lock_irqsave(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) retval = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) qtd_list_free(&new_qtds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) retval = usb_hcd_link_urb_to_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) qtd_list_free(&new_qtds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) qh = urb->ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) qh_in_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) list_for_each_entry(qhit, ep_queue, qh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (qhit == qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) qh_in_queue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (!qh_in_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) list_add_tail(&qh->qh_list, ep_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) qh = qh_alloc(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (!qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) qtd_list_free(&new_qtds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) list_add_tail(&qh->qh_list, ep_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) urb->ep->hcpriv = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) list_splice_tail(&new_qtds, &qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) schedule_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) spin_unlock_irqrestore(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct isp1760_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) int skip_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) WARN_ON(qh->slot == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* We need to forcefully reclaim the slot since some transfers never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return, e.g. interrupt transfers and NAKed bulk transfers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) skip_map |= (1 << qh->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) priv->atl_slots[qh->slot].qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) priv->atl_slots[qh->slot].qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) skip_map |= (1 << qh->slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) priv->int_slots[qh->slot].qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) priv->int_slots[qh->slot].qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) qh->slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * any active transfer belonging to the urb in the process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct isp1760_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int urb_was_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) urb = qtd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) urb_was_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (qtd->urb != urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (qtd->status >= QTD_XFER_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) urb_was_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (last_qtd_of_urb(qtd, qh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) (qtd->status >= QTD_XFER_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) urb_was_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (qtd->status == QTD_XFER_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) kill_transfer(hcd, urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) qtd->status = QTD_RETIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) qh->tt_buffer_dirty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (usb_hub_clear_tt_buffer(urb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /* Clear failed; let's hope things work anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) qh->tt_buffer_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) unsigned long spinflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct isp1760_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct isp1760_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) spin_lock_irqsave(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) retval = usb_hcd_check_unlink_urb(hcd, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) qh = urb->ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (!qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (qtd->urb == urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) dequeue_urb_from_qtd(hcd, qh, qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) list_move(&qtd->qtd_list, &qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) urb->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) schedule_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) spin_unlock_irqrestore(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) static void isp1760_endpoint_disable(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) unsigned long spinflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct isp1760_qh *qh, *qh_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) spin_lock_irqsave(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) qh = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) WARN_ON(!list_empty(&qh->qtd_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) for (i = 0; i < QH_END; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (qh_iter == qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) list_del(&qh_iter->qh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) i = QH_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) qh_free(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) ep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) schedule_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) spin_unlock_irqrestore(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) u32 temp, status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) int retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /* if !PM, root hub timers won't get shut down ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (!HC_IS_RUNNING(hcd->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* init status to no-changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) buf[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) mask = PORT_CSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) temp = reg_read32(hcd->regs, HC_PORTSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (temp & PORT_OWNER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (temp & PORT_CSC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) temp &= ~PORT_CSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) reg_write32(hcd->regs, HC_PORTSC1, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * Return status information even for ports with OWNER set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Otherwise hub_wq wouldn't see the disconnect event when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * high-speed device is switched over to the companion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * controller by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if ((temp & mask) != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) || ((temp & PORT_RESUME) != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) && time_after_eq(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) priv->reset_done))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) buf [0] |= 1 << (0 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) status = STS_PCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) /* FIXME autosuspend idle root hubs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return status ? retval : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct usb_hub_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) int ports = HCS_N_PORTS(priv->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) u16 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) desc->bDescriptorType = USB_DT_HUB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /* priv 1.0, 2.3.9 says 20ms max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) desc->bPwrOn2PwrGood = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) desc->bHubContrCurrent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) desc->bNbrPorts = ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) temp = 1 + (ports / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) desc->bDescLength = 7 + 2 * temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) /* per-port overcurrent reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) temp = HUB_CHAR_INDV_PORT_OCPM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (HCS_PPC(priv->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* per-port power control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) temp |= HUB_CHAR_INDV_PORT_LPSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* no power switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) temp |= HUB_CHAR_NO_LPSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) desc->wHubCharacteristics = cpu_to_le16(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) static int check_reset_complete(struct usb_hcd *hcd, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) int port_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (!(port_status & PORT_CONNECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return port_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /* if reset finished and it's still not enabled -- handoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (!(port_status & PORT_PE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) dev_info(hcd->self.controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) "port %d full speed --> companion\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) port_status |= PORT_OWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) port_status &= ~PORT_RWC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) reg_write32(hcd->regs, HC_PORTSC1, port_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) dev_info(hcd->self.controller, "port %d high speed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return port_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) u16 wValue, u16 wIndex, char *buf, u16 wLength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) int ports = HCS_N_PORTS(priv->hcs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) u32 temp, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * HCS_INDICATOR may say we can change LEDs to off/amber/green.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * (track current state ourselves) ... blink for diagnostics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * power, "this is the one", etc. EHCI spec supports this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) switch (typeReq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) case ClearHubFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) case C_HUB_LOCAL_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) case C_HUB_OVER_CURRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* no hub-wide feature/status flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) case ClearPortFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) if (!wIndex || wIndex > ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) wIndex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) temp = reg_read32(hcd->regs, HC_PORTSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * Even if OWNER is set, so the port is owned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * companion controller, hub_wq needs to be able to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * the port-change status bits (especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * USB_PORT_STAT_C_CONNECTION).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) case USB_PORT_FEAT_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) case USB_PORT_FEAT_C_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /* XXX error? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) case USB_PORT_FEAT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (temp & PORT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (temp & PORT_SUSPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if ((temp & PORT_PE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /* resume signaling for 20 msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) temp &= ~(PORT_RWC_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) reg_write32(hcd->regs, HC_PORTSC1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) temp | PORT_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) priv->reset_done = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) msecs_to_jiffies(USB_RESUME_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) case USB_PORT_FEAT_C_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /* we auto-clear this feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) case USB_PORT_FEAT_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (HCS_PPC(priv->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) reg_write32(hcd->regs, HC_PORTSC1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) temp & ~PORT_POWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) case USB_PORT_FEAT_C_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) case USB_PORT_FEAT_C_OVER_CURRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /* XXX error ?*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) case USB_PORT_FEAT_C_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /* GetPortStatus clears reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) reg_read32(hcd->regs, HC_USBCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) case GetHubDescriptor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) case GetHubStatus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* no hub-wide feature/status flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) memset(buf, 0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) case GetPortStatus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (!wIndex || wIndex > ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) wIndex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) temp = reg_read32(hcd->regs, HC_PORTSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /* wPortChange bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (temp & PORT_CSC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) status |= USB_PORT_STAT_C_CONNECTION << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* whoever resumes must GetPortStatus to complete it!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (temp & PORT_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) dev_err(hcd->self.controller, "Port resume should be skipped.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /* Remote Wakeup received? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (!priv->reset_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* resume signaling for 20 msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) priv->reset_done = jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) + msecs_to_jiffies(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /* check the port again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) mod_timer(&hcd->rh_timer, priv->reset_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /* resume completed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) else if (time_after_eq(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) priv->reset_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) status |= USB_PORT_STAT_C_SUSPEND << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) priv->reset_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /* stop resume signaling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) temp = reg_read32(hcd->regs, HC_PORTSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) reg_write32(hcd->regs, HC_PORTSC1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) temp & ~(PORT_RWC_BITS | PORT_RESUME));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) retval = handshake(hcd, HC_PORTSC1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) PORT_RESUME, 0, 2000 /* 2msec */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) dev_err(hcd->self.controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) "port %d resume error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) wIndex + 1, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* whoever resets must GetPortStatus to complete it!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if ((temp & PORT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) && time_after_eq(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) priv->reset_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) status |= USB_PORT_STAT_C_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) priv->reset_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* force reset to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /* REVISIT: some hardware needs 550+ usec to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * this bit; seems too long to spin routinely...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) retval = handshake(hcd, HC_PORTSC1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) PORT_RESET, 0, 750);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (retval != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) dev_err(hcd->self.controller, "port %d reset error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) wIndex + 1, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /* see what we found out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) temp = check_reset_complete(hcd, wIndex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) reg_read32(hcd->regs, HC_PORTSC1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * Even if OWNER is set, there's no harm letting hub_wq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * see the wPortStatus values (they should all be 0 except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * for PORT_POWER anyway).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (temp & PORT_OWNER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) dev_err(hcd->self.controller, "PORT_OWNER is set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (temp & PORT_CONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) status |= USB_PORT_STAT_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /* status may be from integrated TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) status |= USB_PORT_STAT_HIGH_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (temp & PORT_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) status |= USB_PORT_STAT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (temp & (PORT_SUSPEND|PORT_RESUME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) status |= USB_PORT_STAT_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (temp & PORT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) status |= USB_PORT_STAT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (temp & PORT_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) status |= USB_PORT_STAT_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) put_unaligned(cpu_to_le32(status), (__le32 *) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) case SetHubFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) case C_HUB_LOCAL_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) case C_HUB_OVER_CURRENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /* no hub-wide feature/status flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) case SetPortFeature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) wIndex &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (!wIndex || wIndex > ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) wIndex--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) temp = reg_read32(hcd->regs, HC_PORTSC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (temp & PORT_OWNER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /* temp &= ~PORT_RWC_BITS; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) switch (wValue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case USB_PORT_FEAT_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) case USB_PORT_FEAT_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if ((temp & PORT_PE) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) || (temp & PORT_RESET) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) case USB_PORT_FEAT_POWER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (HCS_PPC(priv->hcs_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) reg_write32(hcd->regs, HC_PORTSC1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) temp | PORT_POWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) case USB_PORT_FEAT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (temp & PORT_RESUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) /* line status bits may report this as low speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * which can be fine if this root hub has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * transaction translator built in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) && PORT_USB11(temp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) temp |= PORT_OWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) temp |= PORT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) temp &= ~PORT_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * caller must wait, then call GetPortStatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * usb 2.0 spec says 50 ms resets on root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) priv->reset_done = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) msecs_to_jiffies(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) reg_write32(hcd->regs, HC_PORTSC1, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) reg_read32(hcd->regs, HC_USBCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* "stall" on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) retval = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static int isp1760_get_frame(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) u32 fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) fr = reg_read32(hcd->regs, HC_FRINDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) return (fr >> 3) % priv->periodic_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) static void isp1760_stop(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) del_timer(&errata2_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) spin_lock_irq(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) ehci_reset(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) /* Disable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) spin_unlock_irq(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) static void isp1760_shutdown(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) u32 command, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) isp1760_stop(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) command = reg_read32(hcd->regs, HC_USBCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) command &= ~CMD_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) reg_write32(hcd->regs, HC_USBCMD, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct isp1760_hcd *priv = hcd_to_priv(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct isp1760_qh *qh = ep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) unsigned long spinflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) spin_lock_irqsave(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) qh->tt_buffer_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) schedule_ptds(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) spin_unlock_irqrestore(&priv->lock, spinflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) static const struct hc_driver isp1760_hc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) .description = "isp1760-hcd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) .product_desc = "NXP ISP1760 USB Host Controller",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) .hcd_priv_size = sizeof(struct isp1760_hcd *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) .irq = isp1760_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) .flags = HCD_MEMORY | HCD_USB2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) .reset = isp1760_hc_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) .start = isp1760_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .stop = isp1760_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .shutdown = isp1760_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .urb_enqueue = isp1760_urb_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .urb_dequeue = isp1760_urb_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) .endpoint_disable = isp1760_endpoint_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) .get_frame_number = isp1760_get_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) .hub_status_data = isp1760_hub_status_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) .hub_control = isp1760_hub_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) int __init isp1760_init_kmem_once(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (!urb_listitem_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) qtd_cachep = kmem_cache_create("isp1760_qtd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (!qtd_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (!qh_cachep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) kmem_cache_destroy(qtd_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) void isp1760_deinit_kmem_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) kmem_cache_destroy(qtd_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) kmem_cache_destroy(qh_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) kmem_cache_destroy(urb_listitem_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) int isp1760_hcd_register(struct isp1760_hcd *priv, void __iomem *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) struct resource *mem, int irq, unsigned long irqflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) struct usb_hcd *hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (!hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) *(struct isp1760_hcd **)hcd->hcd_priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) priv->hcd = hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) init_memory(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) hcd->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) hcd->regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) hcd->rsrc_start = mem->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) hcd->rsrc_len = resource_size(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /* This driver doesn't support wakeup requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) hcd->cant_recv_wakeups = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ret = usb_add_hcd(hcd, irq, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) device_wakeup_enable(hcd->self.controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) usb_put_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) void isp1760_hcd_unregister(struct isp1760_hcd *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!priv->hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) usb_remove_hcd(priv->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) usb_put_hcd(priv->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }