^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2006-2008 Barco N.V.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Derived from the Cypress cy7c67200/300 ezusb linux driver and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * based on multiple host controller drivers inside the linux kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "c67x00.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "c67x00-hcd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * These are the stages for a control urb, they are kept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * in both urb->interval and td->privdata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define SETUP_STAGE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define DATA_STAGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define STATUS_STAGE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * struct c67x00_ep_data: Host endpoint data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct c67x00_ep_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct list_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct usb_host_endpoint *hep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct usb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u16 next_frame; /* For int/isoc transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * struct c67x00_td
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Hardware parts are little endiannes, SW in CPU endianess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct c67x00_td {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* HW specific part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __le16 ly_base_addr; /* Bytes 0-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __le16 port_length; /* Bytes 2-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 pid_ep; /* Byte 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u8 dev_addr; /* Byte 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u8 ctrl_reg; /* Byte 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 status; /* Byte 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 retry_cnt; /* Byte 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define TT_OFFSET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define TT_CONTROL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define TT_ISOCHRONOUS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define TT_BULK 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define TT_INTERRUPT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u8 residue; /* Byte 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __le16 next_td_addr; /* Bytes 10-11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* SW part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct list_head td_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u16 td_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long privdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* These are needed for handling the toggle bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * an urb can be dequeued while a td is in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * after checking the td, the toggle bit might need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * be fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct c67x00_ep_data *ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct c67x00_urb_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct list_head hep_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int cnt; /* packet number for isoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct c67x00_ep_data *ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define td_udev(td) ((td)->ep_data->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define CY_TD_SIZE 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define TD_PIDEP_OFFSET 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define TD_PIDEPMASK_PID 0xF0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define TD_PIDEPMASK_EP 0x0F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define TD_PORTLENMASK_DL 0x03FF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define TD_PORTLENMASK_PN 0xC000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define TD_STATUS_OFFSET 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define TD_STATUSMASK_ACK 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TD_STATUSMASK_ERR 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define TD_STATUSMASK_TMOUT 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define TD_STATUSMASK_SEQ 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define TD_STATUSMASK_SETUP 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TD_STATUSMASK_OVF 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define TD_STATUSMASK_NAK 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define TD_STATUSMASK_STALL 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) TD_STATUSMASK_STALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define TD_RETRYCNT_OFFSET 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define TD_RETRYCNTMASK_ACT_FLG 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define TD_RETRYCNTMASK_TX_TYPE 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define TD_RETRYCNTMASK_RTY_CNT 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define TD_RESIDUE_OVERFLOW 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define TD_PID_IN 0x90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define td_residue(td) ((__s8)(td->residue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define td_port_length(td) (__le16_to_cpu((td)->port_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define td_sequence_ok(td) (!td->status || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) (!(td->status & TD_STATUSMASK_SEQ) == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) !(td->ctrl_reg & SEQ_SEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define td_acked(td) (!td->status || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) (td->status & TD_STATUSMASK_ACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define td_actual_bytes(td) (td_length(td) - td_residue(td))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * dbg_td - Dump the contents of the TD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct device *dev = c67x00_hcd_dev(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dev_dbg(dev, "urb: 0x%p\n", td->urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dev_dbg(dev, "status: 0x%02x\n", td->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dev_dbg(dev, "residue: 0x%02x\n", td->residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dev_dbg(dev, "data: %*ph\n", td_length(td), td->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Helper functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * frame_add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Software wraparound for framenumbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline u16 frame_add(u16 a, u16 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return (a + b) & HOST_FRAME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * frame_after - is frame a after frame b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline int frame_after(u16 a, u16 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (HOST_FRAME_MASK / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * frame_after_eq - is frame a after or equal to frame b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline int frame_after_eq(u16 a, u16 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (HOST_FRAME_MASK / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * c67x00_release_urb - remove link from all tds to this urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Disconnects the urb from it's tds, so that it can be given back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * pre: urb->hcpriv != NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct c67x00_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct c67x00_urb_priv *urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) BUG_ON(!urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) c67x00->urb_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) c67x00->urb_iso_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (c67x00->urb_iso_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) c67x00->max_frame_bw = MAX_FRAME_BW_STD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* TODO this might be not so efficient when we've got many urbs!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Alternatives:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * * only clear when needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * * keep a list of tds with each urbp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) list_for_each_entry(td, &c67x00->td_list, td_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (urb == td->urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) td->urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) urb->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_del(&urbp->hep_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kfree(urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct c67x00_ep_data *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct usb_host_endpoint *hep = urb->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct c67x00_ep_data *ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Check if endpoint already has a c67x00_ep_data struct allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (hep->hcpriv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ep_data = hep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (frame_after(c67x00->current_frame, ep_data->next_frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ep_data->next_frame =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) frame_add(c67x00->current_frame, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return hep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Allocate and initialize a new c67x00 endpoint data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!ep_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) INIT_LIST_HEAD(&ep_data->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) INIT_LIST_HEAD(&ep_data->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ep_data->hep = hep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* hold a reference to udev as long as this endpoint lives,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * this is needed to possibly fix the data toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ep_data->dev = usb_get_dev(urb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) hep->hcpriv = ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* For ISOC and INT endpoints, start ASAP: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ep_data->next_frame = frame_add(c67x00->current_frame, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Add the endpoint data to one of the pipe lists; must be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) in order of endpoint address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) type = usb_pipetype(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (list_empty(&ep_data->node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) list_add(&ep_data->node, &c67x00->list[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct c67x00_ep_data *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) list_for_each_entry(prev, &c67x00->list[type], node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (prev->hep->desc.bEndpointAddress >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) hep->desc.bEndpointAddress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) list_add(&ep_data->node, prev->node.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct c67x00_ep_data *ep_data = hep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!ep_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!list_empty(&ep_data->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) usb_put_dev(ep_data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) list_del(&ep_data->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) list_del(&ep_data->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kfree(ep_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) hep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!list_empty(&ep->urb_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_lock_irqsave(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* loop waiting for all transfers in the endpoint queue to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) while (c67x00_ep_data_free(ep)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Drop the lock so we can sleep waiting for the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_unlock_irqrestore(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* it could happen that we reinitialize this completion, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * somebody was waiting for that completion. The timeout and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * while loop handle such cases, but this might be improved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) reinit_completion(&c67x00->endpoint_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) c67x00_sched_kick(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) spin_lock_irqsave(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_unlock_irqrestore(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline int get_root_port(struct usb_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) while (dev->parent->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dev = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return dev->portnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int c67x00_urb_enqueue(struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct urb *urb, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct c67x00_urb_priv *urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int port = get_root_port(urb->dev)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Allocate and initialize urb private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) urbp = kzalloc(sizeof(*urbp), mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!urbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto err_urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_lock_irqsave(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Make sure host controller is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!HC_IS_RUNNING(hcd->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) goto err_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ret = usb_hcd_link_urb_to_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto err_not_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) INIT_LIST_HEAD(&urbp->hep_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) urbp->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) urbp->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!urbp->ep_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto err_epdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* TODO claim bandwidth with usb_claim_bandwidth?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * also release it somewhere! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) urb->hcpriv = urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) urb->actual_length = 0; /* Nothing received/transmitted yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) switch (usb_pipetype(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) urb->interval = SETUP_STAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (c67x00->urb_iso_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) c67x00->urb_iso_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Assume always URB_ISO_ASAP, FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (list_empty(&urbp->ep_data->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) urb->start_frame = urbp->ep_data->next_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Go right after the last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct urb *last_urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) last_urb = list_entry(urbp->ep_data->queue.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct c67x00_urb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) hep_node)->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) urb->start_frame =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) frame_add(last_urb->start_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) last_urb->number_of_packets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) last_urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) urbp->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Add the URB to the endpoint queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* If this is the only URB, kick start the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!c67x00->urb_count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) c67x00_ll_hpi_enable_sofeop(c67x00->sie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) c67x00_sched_kick(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock_irqrestore(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) err_epdata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) err_not_linked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_unlock_irqrestore(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) kfree(urbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) err_urbp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) spin_lock_irqsave(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rc = usb_hcd_check_unlink_urb(hcd, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) c67x00_release_urb(c67x00, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_unlock(&c67x00->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) usb_hcd_giveback_urb(hcd, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_lock(&c67x00->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) spin_unlock_irqrestore(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_unlock_irqrestore(&c67x00->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * pre: c67x00 locked, urb unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct c67x00_urb_priv *urbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) urbp->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) list_del_init(&urbp->hep_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) c67x00_release_urb(c67x00, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_unlock(&c67x00->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) spin_lock(&c67x00->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int len, int periodic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct c67x00_urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int bit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* According to the C67x00 BIOS user manual, page 3-18,19, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * following calculations provide the full speed bit times for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * a transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * FS(in) = 112.5 + 9.36*BC + HOST_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * FS(in,iso) = 90.5 + 9.36*BC + HOST_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * FS(out) = 112.5 + 9.36*BC + HOST_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * FS(out,iso) = 78.4 + 9.36*BC + HOST_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * LS(in) = 802.4 + 75.78*BC + HOST_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * LS(out) = 802.6 + 74.67*BC + HOST_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * HOST_DELAY == 106 for the c67200 and c67300.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* make calculations in 1/100 bit times to maintain resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Low speed pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (usb_pipein(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) bit_time = 80240 + 7578*len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) bit_time = 80260 + 7467*len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* FS pipes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (usb_pipeisoc(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) bit_time = 11250;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) bit_time += 936*len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Scale back down to integer bit times. Use a host delay of 106.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * (this is the only place it is used) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) bit_time = ((bit_time+50) / 100) + 106;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (unlikely(bit_time + c67x00->bandwidth_allocated >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) c67x00->max_frame_bw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) c67x00->td_base_addr + SIE_TD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (unlikely(c67x00->next_buf_addr + len >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (periodic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) MAX_PERIODIC_BW(c67x00->max_frame_bw)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) c67x00->periodic_bw_allocated += bit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) c67x00->bandwidth_allocated += bit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * td_addr and buf_addr must be word aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) void *data, int len, int pid, int toggle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned long privdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct c67x00_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct c67x00_urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) const __u8 active_flag = 1, retry_cnt = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) __u8 cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int tt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) || usb_pipeint(urb->pipe)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EMSGSIZE; /* Not really an error, but expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) td = kzalloc(sizeof(*td), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) td->pipe = urb->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) td->ep_data = urbp->ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if ((td_udev(td)->speed == USB_SPEED_LOW) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) !(c67x00->low_speed_ports & (1 << urbp->port)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cmd |= PREAMBLE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) switch (usb_pipetype(td->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) case PIPE_ISOCHRONOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) tt = TT_ISOCHRONOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) cmd |= ISO_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) tt = TT_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) tt = TT_BULK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) tt = TT_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (toggle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) cmd |= SEQ_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) cmd |= ARM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* SW part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) td->td_addr = c67x00->next_td_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* HW part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) (urbp->port << 14) | (len & 0x3FF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) (usb_pipeendpoint(td->pipe) & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) td->ctrl_reg = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) td->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) td->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* SW part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) td->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) td->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) td->privdata = privdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) list_add_tail(&td->td_list, &c67x00->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static inline void c67x00_release_td(struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) list_del_init(&td->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) kfree(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int maxps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int need_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) usb_pipeout(urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) remaining = urb->transfer_buffer_length - urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) usb_pipeout(urb->pipe) && !(remaining % maxps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) while (remaining || need_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) char *td_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) len = (remaining > maxps) ? maxps : remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) need_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) DATA_STAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return ret; /* td wasn't created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) toggle ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) remaining -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (usb_pipecontrol(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * return 0 in case more bandwidth is available, else errorcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) switch (urb->interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) case SETUP_STAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 8, USB_PID_SETUP, 0, SETUP_STAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) urb->interval = SETUP_STAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) usb_pipeout(urb->pipe), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) case DATA_STAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (urb->transfer_buffer_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = c67x00_add_data_urb(c67x00, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case STATUS_STAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) STATUS_STAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * return 0 in case more bandwidth is available, else errorcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct c67x00_urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) urbp->ep_data->next_frame =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) frame_add(urbp->ep_data->next_frame, urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return c67x00_add_data_urb(c67x00, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct c67x00_urb_priv *urbp = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) char *td_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int len, pid, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) BUG_ON(urbp->cnt >= urb->number_of_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) td_buf = urb->transfer_buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) urb->iso_frame_desc[urbp->cnt].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) len = urb->iso_frame_desc[urbp->cnt].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) urbp->cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) urb->iso_frame_desc[urbp->cnt].actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) urb->iso_frame_desc[urbp->cnt].status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (urbp->cnt + 1 == urb->number_of_packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) c67x00_giveback_urb(c67x00, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) urbp->ep_data->next_frame =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) frame_add(urbp->ep_data->next_frame, urb->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) urbp->cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int (*add)(struct c67x00_hcd *, struct urb *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct c67x00_ep_data *ep_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* traverse every endpoint on the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) list_for_each_entry(ep_data, &c67x00->list[type], node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!list_empty(&ep_data->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* and add the first urb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* isochronous transfer rely on this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) urb = list_entry(ep_data->queue.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct c67x00_urb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) hep_node)->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) add(c67x00, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct c67x00_td *td, *ttd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Check if we can proceed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!list_empty(&c67x00->td_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dev_warn(c67x00_hcd_dev(c67x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) "TD list not empty! This should not happen!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dbg_td(c67x00, td, "Unprocessed td");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) c67x00_release_td(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* Reinitialize variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) c67x00->bandwidth_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) c67x00->periodic_bw_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) c67x00->next_td_addr = c67x00->td_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) c67x00->next_buf_addr = c67x00->buf_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* Fill the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Get TD from C67X00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) c67x00_ll_read_mem_le16(c67x00->sie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) td->td_addr, td, CY_TD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (usb_pipein(td->pipe) && td_actual_bytes(td))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) td->data, td_actual_bytes(td));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (td->status & TD_STATUSMASK_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dbg_td(c67x00, td, "ERROR_FLAG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (td->status & TD_STATUSMASK_STALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* dbg_td(c67x00, td, "STALL"); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (td->status & TD_STATUSMASK_TMOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dbg_td(c67x00, td, "TIMEOUT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static inline int c67x00_end_of_data(struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int maxps, need_empty, remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct urb *urb = td->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) int act_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) act_bytes = td_actual_bytes(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (unlikely(!act_bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return 1; /* This was an empty packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (unlikely(act_bytes < maxps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return 1; /* Smaller then full packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) remaining = urb->transfer_buffer_length - urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) usb_pipeout(urb->pipe) && !(remaining % maxps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (unlikely(!remaining && !need_empty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* Remove all td's from the list which come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * after last_td and are meant for the same pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * This is used when a short packet has occurred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct c67x00_td *last_td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct c67x00_td *td, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) td = last_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) tmp = last_td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) while (td->td_list.next != &c67x00->td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) td = list_entry(td->td_list.next, struct c67x00_td, td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (td->pipe == last_td->pipe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) c67x00_release_td(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) td = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) tmp = td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct urb *urb = td->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) urb->actual_length += td_actual_bytes(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) switch (usb_pipetype(td->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* isochronous tds are handled separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) case PIPE_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) switch (td->privdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) case SETUP_STAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) urb->interval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) urb->transfer_buffer_length ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) DATA_STAGE : STATUS_STAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* Don't count setup_packet with normal data: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) urb->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case DATA_STAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (c67x00_end_of_data(td)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) urb->interval = STATUS_STAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) c67x00_clear_pipe(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case STATUS_STAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) urb->interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) c67x00_giveback_urb(c67x00, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) case PIPE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) case PIPE_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (unlikely(c67x00_end_of_data(td))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) c67x00_clear_pipe(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) c67x00_giveback_urb(c67x00, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct urb *urb = td->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (!urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cnt = td->privdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (td->status & TD_ERROR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (cnt + 1 == urb->number_of_packets) /* Last packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) c67x00_giveback_urb(c67x00, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * c67x00_check_td_list - handle tds which have been processed by the c67x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * pre: current_td == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct c67x00_td *td, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int ack_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int clear_endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* get the TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) c67x00_parse_td(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) urb = td->urb; /* urb can be NULL! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ack_ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) clear_endpoint = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* Handle isochronous transfers separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (usb_pipeisoc(td->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) clear_endpoint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) c67x00_handle_isoc(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* When an error occurs, all td's for that pipe go into an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * inactive state. This state matches successful transfers so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * we must make sure not to service them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (td->status & TD_ERROR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) c67x00_giveback_urb(c67x00, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) c67x00_td_to_error(c67x00, td));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) !td_acked(td))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Sequence ok and acked, don't need to fix toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ack_ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (unlikely(td->status & TD_STATUSMASK_OVF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) clear_endpoint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) c67x00_handle_successful_td(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (clear_endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) c67x00_clear_pipe(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ack_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) usb_pipeout(td->pipe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) !(td->ctrl_reg & SEQ_SEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* next in list could have been removed, due to clear_pipe! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) tmp = list_entry(td->td_list.next, typeof(*td), td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) c67x00_release_td(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /* If all tds are processed, we can check the previous frame (if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * there was any) and start our next frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return !c67x00_ll_husb_get_current_td(c67x00->sie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * Send td to C67X00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) int len = td_length(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) td->data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) c67x00_ll_write_mem_le16(c67x00->sie->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) td->td_addr, td, CY_TD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static void c67x00_send_frame(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct c67x00_td *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (list_empty(&c67x00->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dev_warn(c67x00_hcd_dev(c67x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) "%s: td list should not be empty here!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) list_for_each_entry(td, &c67x00->td_list, td_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (td->td_list.next == &c67x00->td_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) td->next_td_addr = 0; /* Last td in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) c67x00_send_td(c67x00, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * c67x00_do_work - Schedulers state machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static void c67x00_do_work(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) spin_lock(&c67x00->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Make sure all tds are processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!c67x00_all_tds_processed(c67x00))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) c67x00_check_td_list(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* no td's are being processed (current == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * and all have been "checked" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) complete(&c67x00->endpoint_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!list_empty(&c67x00->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (c67x00->current_frame == c67x00->last_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) goto out; /* Don't send tds in same frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) c67x00->last_frame = c67x00->current_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* If no urbs are scheduled, our work is done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (!c67x00->urb_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) c67x00_ll_hpi_disable_sofeop(c67x00->sie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) c67x00_fill_frame(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (!list_empty(&c67x00->td_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* TD's have been added to the frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) c67x00_send_frame(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) spin_unlock(&c67x00->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void c67x00_sched_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) c67x00_do_work(c67x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void c67x00_sched_kick(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) tasklet_hi_schedule(&c67x00->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) tasklet_kill(&c67x00->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }