^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004-2013 Synopsys, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * notice, this list of conditions, and the following disclaimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * without modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 3. The names of the above-listed copyright holders may not be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * to endorse or promote products derived from this software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * ALTERNATIVELY, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * GNU General Public License ("GPL") as published by the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Foundation; either version 2 of the License, or (at your option) any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * This file contains the functions to manage Queue Heads and Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Transfer Descriptors for Host mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/gcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/usb/hcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/usb/ch11.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "hcd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Wait this long before releasing periodic reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* If we get a NAK, wait this long before retrying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * dwc2_periodic_channel_available() - Checks that a channel is available for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * periodic transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Currently assuming that there is a dedicated host channel for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * each periodic transaction plus at least one host channel for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * non-periodic transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) num_channels = hsotg->params.host_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if ((hsotg->periodic_channels + hsotg->non_periodic_channels <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) num_channels) && (hsotg->periodic_channels < num_channels - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) dev_dbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __func__, num_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) hsotg->periodic_channels, hsotg->non_periodic_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * for the specified QH in the periodic schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @qh: QH containing periodic bandwidth required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * For simplicity, this calculation assumes that all the transfers in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * periodic schedule may occur in the same (micro)frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) s16 max_claimed_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * High speed mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Max periodic usecs is 80% x 125 usec = 100 usec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) max_claimed_usecs = 100 - qh->host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Full speed mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Max periodic usecs is 90% x 1000 usec = 900 usec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) max_claimed_usecs = 900 - qh->host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (hsotg->periodic_usecs > max_claimed_usecs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dev_err(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "%s: already claimed usecs %d, required usecs %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __func__, hsotg->periodic_usecs, qh->host_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * pmap_schedule() - Schedule time in a periodic bitmap (pmap).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @map: The bitmap representing the schedule; will be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * upon success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @bits_per_period: The schedule represents several periods. This is how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * bits are in each period. It's assumed that the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * of the schedule will repeat after its end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @periods_in_map: The number of periods in the schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @num_bits: The number of bits we need per period we want to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * in this function call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @interval: How often we need to be scheduled for the reservation this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * time. 1 means every period. 2 means every other period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * ...you get the picture?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @start: The bit number to start at. Normally 0. Must be within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * the interval or we return failure right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @only_one_period: Normally we'll allow picking a start anywhere within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * first interval, since we can still make all repetition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * requirements by doing that. However, if you pass true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * here then we'll return failure if we can't fit within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * the period that "start" is in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * The idea here is that we want to schedule time for repeating events that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * want the same resource. The resource is divided into fixed-sized periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * and the events want to repeat every "interval" periods. The schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * granularity is one bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * To keep things "simple", we'll represent our schedule with a bitmap that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * contains a fixed number of periods. This gets rid of a lot of complexity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * but does mean that we need to handle things specially (and non-ideally) if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * the number of the periods in the schedule doesn't match well with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * intervals that we're trying to schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Here's an explanation of the scheme we'll implement, assuming 8 periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * - If interval is 1, we need to take up space in each of the 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * periods we're scheduling. Easy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * - If interval is 2, we need to take up space in half of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * periods. Again, easy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * - If interval is 3, we actually need to fall back to interval 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Why? Because we might need time in any period. AKA for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * first 8 periods, we'll be in slot 0, 3, 6. Then we'll be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * 0, 3, and 6. Since we could be in any frame we need to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * for all of them. Sucks, but that's what you gotta do. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * if we were instead scheduling 8 * 3 = 24 we'd do much better, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * then we need more memory and time to do scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * - If interval is 4, easy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * - If interval is 5, we again need interval 1. The schedule will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * 0, 5, 2, 7, 4, 1, 6, 3, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * - If interval is 6, we need interval 2. 0, 6, 4, 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * - If interval is 7, we need interval 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * - If interval is 8, we need interval 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * If you do the math, you'll see that we need to pretend that interval is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * equal to the greatest_common_divisor(interval, periods_in_map).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Note that at the moment this function tends to front-pack the schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * In some cases that's really non-ideal (it's hard to schedule things that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * need to repeat every period). In other cases it's perfect (you can easily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * schedule bigger, less often repeating things).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Here's the algorithm in action (8 periods, 5 bits per period):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * | | | | | | | | | Remv 1 bits, intv 1 at 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * This function is pretty generic and could be easily abstracted if anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * needed similar scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Returns either -ENOSPC or a >= 0 start bit which should be passed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * unschedule routine. The map bitmap will be updated on a non-error result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int pmap_schedule(unsigned long *map, int bits_per_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int periods_in_map, int num_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int interval, int start, bool only_one_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int interval_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int to_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int first_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (num_bits > bits_per_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Adjust interval as per description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) interval = gcd(interval, periods_in_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) interval_bits = bits_per_period * interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) to_reserve = periods_in_map / interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* If start has gotten us past interval then we can't schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (start >= interval_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (only_one_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* Must fit within same period as start; end at begin of next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) first_end = (start / bits_per_period + 1) * bits_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Can fit anywhere in the first interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) first_end = interval_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * We'll try to pick the first repetition, then see if that time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * is free for each of the subsequent repetitions. If it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * we'll adjust the start time for the next search of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * repetition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) while (start + num_bits <= first_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Need to stay within this period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) end = (start / bits_per_period + 1) * bits_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Look for num_bits us in this microframe starting at start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) start = bitmap_find_next_zero_area(map, end, start, num_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * We should get start >= end if we fail. We might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * able to check the next microframe depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * interval, so continue on (start already updated).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (start >= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* At this point we have a valid point for first one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (i = 1; i < to_reserve; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int ith_start = start + interval_bits * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int ith_end = end + interval_bits * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Use this as a dumb "check if bits are 0" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret = bitmap_find_next_zero_area(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) map, ith_start + num_bits, ith_start, num_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* We got the right place, continue checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret == ith_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Move start up for next time and exit for loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ith_start = bitmap_find_next_zero_area(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) map, ith_end, ith_start, num_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (ith_start >= ith_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Need a while new period next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) start = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) start = ith_start - interval_bits * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* If didn't exit the for loop with a break, we have success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (i == to_reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (start + num_bits > first_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for (i = 0; i < to_reserve; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int ith_start = start + interval_bits * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bitmap_set(map, ith_start, num_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * pmap_unschedule() - Undo work done by pmap_schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @map: See pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * @bits_per_period: See pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * @periods_in_map: See pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @num_bits: The number of bits that was passed to schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @interval: The interval that was passed to schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * @start: The return value from pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void pmap_unschedule(unsigned long *map, int bits_per_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int periods_in_map, int num_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int interval, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int interval_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int to_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Adjust interval as per description in pmap_schedule() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) interval = gcd(interval, periods_in_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) interval_bits = bits_per_period * interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) to_release = periods_in_map / interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) for (i = 0; i < to_release; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int ith_start = start + interval_bits * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) bitmap_clear(map, ith_start, num_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * dwc2_get_ls_map() - Get the map used for the given qh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * We'll always get the periodic map out of our TT. Note that even if we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * running the host straight in low speed / full speed mode it appears as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * a TT is allocated for us, so we'll use it. If that ever changes we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * add logic here to get a map out of "hsotg" if !qh->do_split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Returns: the map or NULL if a map couldn't be found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned long *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Don't expect to be missing a TT and be doing low speed scheduling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (WARN_ON(!qh->dwc_tt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Get the map and adjust if this is a multi_tt hub */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) map = qh->dwc_tt->periodic_bitmaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (qh->dwc_tt->usb_tt->multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #ifdef DWC2_PRINT_SCHEDULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * cat_printf() - A printf() + strcat() helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * This is useful for concatenating a bunch of strings where each string is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * constructed using printf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @buf: The destination buffer; will be updated to point after the printed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * @size: The number of bytes in the buffer (includes space for '\0').
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @fmt: The format for printf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @...: The args for printf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static __printf(3, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void cat_printf(char **buf, size_t *size, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (*size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) i = vsnprintf(*buf, *size, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (i >= *size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) (*buf)[*size - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) *buf += *size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) *size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *buf += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *size -= i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * pmap_print() - Print the given periodic map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Will attempt to print out the periodic schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * @map: See pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @bits_per_period: See pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @periods_in_map: See pmap_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * @period_name: The name of 1 period, like "uFrame"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @units: The name of the units, like "us".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * @print_fn: The function to call for printing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * @print_data: Opaque data to pass to the print function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void pmap_print(unsigned long *map, int bits_per_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int periods_in_map, const char *period_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) const char *units,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) void (*print_fn)(const char *str, void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) void *print_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) for (period = 0; period < periods_in_map; period++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) char tmp[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) char *buf = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) size_t buf_size = sizeof(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int period_start = period * bits_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int period_end = period_start + bits_per_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bool printed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) for (i = period_start; i < period_end + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Handle case when ith bit is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (i < period_end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) bitmap_find_next_zero_area(map, i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) i, 1, 0) != i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) start = i - period_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* ith bit isn't set; don't care if count == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) cat_printf(&buf, &buf_size, "%s %d: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) period_name, period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cat_printf(&buf, &buf_size, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) printed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) units, start + count - 1, units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) print_fn(tmp, print_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct dwc2_qh_print_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct dwc2_hsotg *hsotg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct dwc2_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * dwc2_qh_print() - Helper function for dwc2_qh_schedule_print()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * @str: The string to print
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @data: A pointer to a struct dwc2_qh_print_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static void dwc2_qh_print(const char *str, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct dwc2_qh_print_data *print_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * dwc2_qh_schedule_print() - Print the periodic schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * @qh: QH to print.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct dwc2_qh_print_data print_data = { hsotg, qh };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * The printing functions are quite slow and inefficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * If we don't have tracing turned on, don't run unless the special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * define is turned on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (qh->schedule_low_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned long *map = dwc2_get_ls_map(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) qh, qh->device_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) DWC2_ROUND_US_TO_SLICE(qh->device_us),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) dwc2_sch_dbg(hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) "QH=%p Whole low/full speed map %p now:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) qh, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) dwc2_qh_print, &print_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for (i = 0; i < qh->num_hs_transfers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int uframe = trans_time->start_schedule_us /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) DWC2_HS_PERIODIC_US_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int rel_us = trans_time->start_schedule_us %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) DWC2_HS_PERIODIC_US_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dwc2_sch_dbg(hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) qh, i, trans_time->duration_us, uframe, rel_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (qh->num_hs_transfers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pmap_print(hsotg->hs_periodic_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) DWC2_HS_PERIODIC_US_PER_UFRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dwc2_qh_print, &print_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct dwc2_qh *qh) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * dwc2_ls_pmap_schedule() - Schedule a low speed QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * @search_slice: We'll start trying to schedule at the passed slice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * Remember that slices are the units of the low speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * schedule (think 25us or so).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Wraps pmap_schedule() with the right parameters for low speed scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Normally we schedule low speed devices on the map associated with the TT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Returns: 0 for success or an error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int search_slice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) unsigned long *map = dwc2_get_ls_map(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * Schedule on the proper low speed map with our low speed scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * parameters. Note that we use the "device_interval" here since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * we want the low speed interval and the only way we'd be in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * function is if the device is low speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * If we happen to be doing low speed and high speed scheduling for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * same transaction (AKA we have a split) we always do low speed first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * That means we can always pass "false" for only_one_period (that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * parameters is only useful when we're trying to get one schedule to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * match what we already planned in the other schedule).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) DWC2_LS_SCHEDULE_FRAMES, slices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) qh->device_interval, search_slice, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (slice < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) qh->ls_start_schedule_slice = slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned long *map = dwc2_get_ls_map(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Schedule should have failed, so no worries about no error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) qh->ls_start_schedule_slice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * dwc2_hs_pmap_schedule - Schedule in the main high speed schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * This will schedule something on the main dwc2 schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * update this with the result upon success. We also use the duration from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * the same structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * @only_one_period: If true we will limit ourselves to just looking at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * one period (aka one 100us chunk). This is used if we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * already scheduled something on the low speed schedule and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * need to find something that matches on the high speed one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * @index: The index into qh->hs_transfers that we're working with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Returns: 0 for success or an error code. Upon success the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * dwc2_hs_transfer_time specified by "index" will be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) bool only_one_period, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) us = pmap_schedule(hsotg->hs_periodic_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) DWC2_HS_PERIODIC_US_PER_UFRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) qh->host_interval, trans_time->start_schedule_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) only_one_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (us < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) trans_time->start_schedule_us = us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * @index: Transfer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct dwc2_qh *qh, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pmap_unschedule(hsotg->hs_periodic_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) DWC2_HS_PERIODIC_US_PER_UFRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) qh->host_interval, trans_time->start_schedule_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * This is the most complicated thing in USB. We have to find matching time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * in both the global high speed schedule for the port and the low speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * schedule for the TT associated with the given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Being here means that the host must be running in high speed mode and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * device is in low or full speed mode (and behind a hub).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) int bytecount = qh->maxp_mult * qh->maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int ls_search_slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int host_interval_in_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * The interval (how often to repeat) in the actual host schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * See pmap_schedule() for gcd() explanation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) host_interval_in_sched = gcd(qh->host_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) DWC2_HS_SCHEDULE_UFRAMES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * We always try to find space in the low speed schedule first, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * try to find high speed time that matches. If we don't, we'll bump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * up the place we start searching in the low speed schedule and try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * again. To start we'll look right at the beginning of the low speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Note that this will tend to front-load the high speed schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * We may eventually want to try to avoid this by either considering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * both schedules together or doing some sort of round robin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * For isoc split out, start schedule at the 2 * DWC2_SLICES_PER_UFRAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * to transfer SSPLIT-begin OUT transaction like EHCI controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ls_search_slice = 2 * DWC2_SLICES_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ls_search_slice = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int start_s_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int ssplit_s_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) int second_s_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int rel_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) int first_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int middle_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int end_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int first_data_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int other_data_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (qh->schedule_low_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * If we got an error here there's no other magic we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * can do, so bail. All the looping above is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * helpful to redo things if we got a low speed slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * and then couldn't find a matching high speed slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Must be missing the tt structure? Why? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * This will give us a number 0 - 7 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) start_s_uframe = qh->ls_start_schedule_slice /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) DWC2_SLICES_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* Get a number that's always 0 - 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) rel_uframe = (start_s_uframe % 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * If we were going to start in uframe 7 then we would need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * issue a start split in uframe 6, which spec says is not OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Move on to the next full frame (assuming there is one).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * See 11.18.4 Host Split Transaction Scheduling Requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * bullet 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (rel_uframe == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (qh->schedule_low_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dwc2_ls_pmap_unschedule(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ls_search_slice =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) (qh->ls_start_schedule_slice /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) DWC2_LS_PERIODIC_SLICES_PER_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * For ISOC in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * - start split (frame -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * - complete split w/ data (frame +1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * - complete split w/ data (frame +2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * - ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * - complete split w/ data (frame +num_data_packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * - complete split w/ data (frame +num_data_packets+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * - complete split w/ data (frame +num_data_packets+2, max 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * ...though if frame was "0" then max is 7...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * For ISOC out we might need to do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * - start split w/ data (frame -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * - start split w/ data (frame +0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * - ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * - start split w/ data (frame +num_data_packets-2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * For INTERRUPT in we might need to do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * - start split (frame -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * - complete split w/ data (frame +1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * - complete split w/ data (frame +2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * - complete split w/ data (frame +3, max 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * For INTERRUPT out we might need to do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * - start split w/ data (frame -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * - complete split (frame +1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * - complete split (frame +2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * - complete split (frame +3, max 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Start adjusting!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ssplit_s_uframe = (start_s_uframe +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) host_interval_in_sched - 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) host_interval_in_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) second_s_uframe = start_s_uframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) second_s_uframe = start_s_uframe + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* First data transfer might not be all 188 bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) first_data_bytes = 188 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) DWC2_SLICES_PER_UFRAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) DWC2_SLICES_PER_UFRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (first_data_bytes > bytecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) first_data_bytes = bytecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) other_data_bytes = bytecount - first_data_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * For now, skip OUT xfers where first xfer is partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Main dwc2 code assumes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * - INT transfers never get split in two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * - ISOC transfers can always transfer 188 bytes the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Until that code is fixed, try again if the first transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * couldn't transfer everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * This code can be removed if/when the rest of dwc2 handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * the above cases. Until it's fixed we just won't be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * to schedule quite as tightly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (!qh->ep_is_in &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) (first_data_bytes != min_t(int, 188, bytecount))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dwc2_sch_dbg(hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) "QH=%p avoiding broken 1st xfer (%d, %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) qh, first_data_bytes, bytecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (qh->schedule_low_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dwc2_ls_pmap_unschedule(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ls_search_slice = (start_s_uframe + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) DWC2_SLICES_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Start by assuming transfers for the bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * Everything except ISOC OUT has extra transfers. Rules are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * complicated. See 11.18.4 Host Split Transaction Scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Requirements bullet 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (rel_uframe == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) qh->num_hs_transfers += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) qh->num_hs_transfers += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (qh->ep_is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * First is start split, middle/end is data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Allocate full data bytes for all data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) first_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) middle_count = bytecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) end_count = bytecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * First is data, middle/end is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * First transfer and second can have data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Rest should just have complete split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) first_count = first_data_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) middle_count = max_t(int, 4, other_data_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) end_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (qh->ep_is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Account for the start split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) qh->num_hs_transfers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* Calculate "L" value from spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) last = rel_uframe + qh->num_hs_transfers + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Start with basic case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (last <= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) qh->num_hs_transfers += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) qh->num_hs_transfers += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* Adjust downwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (last >= 6 && rel_uframe == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) qh->num_hs_transfers--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* 1st = start; rest can contain data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) first_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) middle_count = min_t(int, 188, bytecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) end_count = middle_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* All contain data, last might be smaller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) first_count = first_data_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) middle_count = min_t(int, 188,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) other_data_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) end_count = other_data_bytes % 188;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* Assign durations per uFrame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) for (i = 1; i < qh->num_hs_transfers - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) qh->hs_transfers[i].duration_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) HS_USECS_ISO(middle_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (qh->num_hs_transfers > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) HS_USECS_ISO(end_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * Assign start us. The call below to dwc2_hs_pmap_schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * will start with these numbers but may adjust within the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * microframe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) qh->hs_transfers[0].start_schedule_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) for (i = 1; i < qh->num_hs_transfers; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) qh->hs_transfers[i].start_schedule_us =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ((second_s_uframe + i - 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) DWC2_HS_SCHEDULE_UFRAMES) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) DWC2_HS_PERIODIC_US_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* Try to schedule with filled in hs_transfers above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) for (i = 0; i < qh->num_hs_transfers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* If we scheduled all w/out breaking out then we're all good */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (i == qh->num_hs_transfers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) for (; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) dwc2_hs_pmap_unschedule(hsotg, qh, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (qh->schedule_low_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) dwc2_ls_pmap_unschedule(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Try again starting in the next microframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* In non-split host and device time are the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) WARN_ON(qh->host_us != qh->device_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) WARN_ON(qh->host_interval != qh->device_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) WARN_ON(qh->num_hs_transfers != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* We'll have one transfer; init start to 0 before calling scheduler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) qh->hs_transfers[0].start_schedule_us = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) qh->hs_transfers[0].duration_us = qh->host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* In non-split host and device time are the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) WARN_ON(qh->host_us != qh->device_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) WARN_ON(qh->host_interval != qh->device_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) WARN_ON(!qh->schedule_low_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* Run on the main low speed schedule (no split = no hub = no TT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return dwc2_ls_pmap_schedule(hsotg, qh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * dwc2_uframe_schedule - Schedule a QH for a periodic xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Calls one of the 3 sub-function depending on what type of transfer this QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * is for. Also adds some printing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (qh->dev_speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ret = dwc2_uframe_schedule_hs(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) else if (!qh->do_split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ret = dwc2_uframe_schedule_ls(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ret = dwc2_uframe_schedule_split(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dwc2_qh_schedule_print(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * @hsotg: The HCD state structure for the DWC OTG controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) for (i = 0; i < qh->num_hs_transfers; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dwc2_hs_pmap_unschedule(hsotg, qh, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (qh->schedule_low_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) dwc2_ls_pmap_unschedule(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * Takes a qh that has already been scheduled (which means we know we have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * bandwdith reserved for us) and set the next_active_frame and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * start_active_frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * This is expected to be called on qh's that weren't previously actively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * running. It just picks the next frame that we can fit into without any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * thought about the past.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * @qh: QH for a periodic endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) u16 frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u16 earliest_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) u16 next_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) u16 relative_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) u16 interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * Use the real frame number rather than the cached value as of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * last SOF to give us a little extra slop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) frame_number = dwc2_hcd_get_frame_number(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * We wouldn't want to start any earlier than the next frame just in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * case the frame number ticks as we're doing this calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * NOTE: if we could quantify how long till we actually get scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * we might be able to avoid the "+ 1" by looking at the upper part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * HFNUM (the FRREM field). For now we'll just use the + 1 though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) earliest_frame = dwc2_frame_num_inc(frame_number, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) next_active_frame = earliest_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Get the "no microframe schduler" out of the way... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!hsotg->params.uframe_sched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (qh->do_split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Splits are active at microframe 0 minus 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) next_active_frame |= 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * We're either at high speed or we're doing a split (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * means we're talking high speed to a hub). In any case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * the first frame should be based on when the first scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * event is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) WARN_ON(qh->num_hs_transfers < 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) relative_frame = qh->hs_transfers[0].start_schedule_us /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) DWC2_HS_PERIODIC_US_PER_UFRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Adjust interval as per high speed schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * Low or full speed directly on dwc2. Just about the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * as high speed but on a different schedule and with slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * different adjustments. Note that this works because when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * the host and device are both low speed then frames in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * controller tick at low speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) relative_frame = qh->ls_start_schedule_slice /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) DWC2_LS_PERIODIC_SLICES_PER_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* Scheduler messed up if frame is past interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) WARN_ON(relative_frame >= interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * done the gcd(), so it's safe to move to the beginning of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * interval like this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * After this we might be before earliest_frame, but don't worry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * we'll fix it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) next_active_frame = (next_active_frame / interval) * interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * Actually choose to start at the frame number we've been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * scheduled for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) next_active_frame = dwc2_frame_num_inc(next_active_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) relative_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * We actually need 1 frame before since the next_active_frame is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * the frame number we'll be put on the ready list and we won't be on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * the bus until 1 frame later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * By now we might actually be before the earliest_frame. Let's move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * up intervals until we're not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) next_active_frame = dwc2_frame_num_inc(next_active_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) qh->next_active_frame = next_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) qh->start_active_frame = next_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) qh, frame_number, qh->next_active_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * dwc2_do_reserve() - Make a periodic reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * Try to allocate space in the periodic schedule. Depending on parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * this might use the microframe scheduler or the dumb scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * Returns: 0 upon success; error upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (hsotg->params.uframe_sched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) status = dwc2_uframe_schedule(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) status = dwc2_periodic_channel_available(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) dev_info(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) "%s: No host channel available for periodic transfer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) status = dwc2_check_periodic_bandwidth(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) dev_dbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) "%s: Insufficient periodic bandwidth for periodic transfer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!hsotg->params.uframe_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* Reserve periodic channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) hsotg->periodic_channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* Update claimed usecs per (micro)frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) hsotg->periodic_usecs += qh->host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) dwc2_pick_first_frame(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * dwc2_do_unreserve() - Actually release the periodic reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * This function actually releases the periodic bandwidth that was reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * by the given qh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) assert_spin_locked(&hsotg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) WARN_ON(!qh->unreserve_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* No more unreserve pending--we're doing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) qh->unreserve_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (WARN_ON(!list_empty(&qh->qh_list_entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) list_del_init(&qh->qh_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* Update claimed usecs per (micro)frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) hsotg->periodic_usecs -= qh->host_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (hsotg->params.uframe_sched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dwc2_uframe_unschedule(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /* Release periodic channel reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) hsotg->periodic_channels--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * According to the kernel doc for usb_submit_urb() (specifically the part about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * long as a device driver keeps submitting. Since we're using HCD_BH to give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * back the URB we need to give the driver a little bit of time before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * release the reservation. This worker is called after the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * @t: Address to a qh unreserve_work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static void dwc2_unreserve_timer_fn(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct dwc2_hsotg *hsotg = qh->hsotg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * Wait for the lock, or for us to be scheduled again. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * could be scheduled again if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * - We started executing but didn't get the lock yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * - A new reservation came in, but cancel didn't take effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * because we already started executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * - The timer has been kicked again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * In that case cancel and wait for the next call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (timer_pending(&qh->unreserve_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * Might be no more unreserve pending if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * - We started executing but didn't get the lock yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * - A new reservation came in, but cancel didn't take effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * because we already started executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * We can't put this in the loop above because unreserve_pending needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * to be accessed under lock, so we can only check it once we got the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (qh->unreserve_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dwc2_do_unreserve(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * host channel is large enough to handle the maximum data transfer in a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * (micro)frame for a periodic transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * @qh: QH for a periodic endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) u32 max_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) u32 max_channel_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) max_xfer_size = qh->maxp * qh->maxp_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) max_channel_xfer_size = hsotg->params.max_transfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (max_xfer_size > max_channel_xfer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) dev_err(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) "%s: Periodic xfer length %d > max xfer length for channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) __func__, max_xfer_size, max_channel_xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * the periodic schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * @qh: QH for the periodic transfer. The QH should already contain the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * scheduling information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) status = dwc2_check_max_xfer_size(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) dev_dbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) "%s: Channel max transfer size too small for periodic transfer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /* Cancel pending unreserve; if canceled OK, unreserve was pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (del_timer(&qh->unreserve_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) WARN_ON(!qh->unreserve_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * Only need to reserve if there's not an unreserve pending, since if an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * unreserve is pending then by definition our old reservation is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * valid. Unreserve might still be pending even if we didn't cancel if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * dwc2_unreserve_timer_fn() already started. Code in the timer handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!qh->unreserve_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) status = dwc2_do_reserve(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * It might have been a while, so make sure that frame_number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * is still good. Note: we could also try to use the similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * dwc2_next_periodic_start() but that schedules much more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * tightly and we might need to hurry and queue things up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (dwc2_frame_num_le(qh->next_active_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) hsotg->frame_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dwc2_pick_first_frame(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) qh->unreserve_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (hsotg->params.dma_desc_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* Don't rely on SOF and start in ready schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* Always start in inactive schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) list_add_tail(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) &hsotg->periodic_sched_inactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * from the periodic schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * @qh: QH for the periodic transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) bool did_modify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) assert_spin_locked(&hsotg->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * Schedule the unreserve to happen in a little bit. Cases here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * - Unreserve worker might be sitting there waiting to grab the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * In this case it will notice it's been schedule again and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * quit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * - Unreserve worker might not be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * We should never already be scheduled since dwc2_schedule_periodic()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * should have canceled the scheduled unreserve timer (hence the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * warning on did_modify).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * We add + 1 to the timer to guarantee that at least 1 jiffy has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * passed (otherwise if the jiffy counter might tick right after we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * read it and we'll get no delay).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) did_modify = mod_timer(&qh->unreserve_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) jiffies + DWC2_UNRESERVE_DELAY + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) WARN_ON(did_modify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) qh->unreserve_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) list_del_init(&qh->qh_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * dwc2_wait_timer_fn() - Timer function to re-queue after waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * As per the spec, a NAK indicates that "a function is temporarily unable to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * transmit or receive data, but will eventually be able to do so without need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * of host intervention".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * That means that when we encounter a NAK we're supposed to retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * ...but if we retry right away (from the interrupt handler that saw the NAK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * then we can end up with an interrupt storm (if the other side keeps NAKing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * us) because on slow enough CPUs it could take us longer to get out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * interrupt routine than it takes for the device to send another NAK. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * leads to a constant stream of NAK interrupts and the CPU locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * ...so instead of retrying right away in the case of a NAK we'll set a timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * to retry some time later. This function handles that timer and moves the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * qh back to the "inactive" list, then queues transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * @t: Pointer to wait_timer in a qh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * Return: HRTIMER_NORESTART to not automatically restart this timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct dwc2_hsotg *hsotg = qh->hsotg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) spin_lock_irqsave(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * We'll set wait_timer_cancel to true if we want to cancel this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * operation in dwc2_hcd_qh_unlink().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (!qh->wait_timer_cancel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) enum dwc2_transaction_type tr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) qh->want_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) list_move(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) &hsotg->non_periodic_sched_inactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) tr_type = dwc2_hcd_select_transactions(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (tr_type != DWC2_TRANSACTION_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) dwc2_hcd_queue_transactions(hsotg, tr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * dwc2_qh_init() - Initializes a QH structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * @qh: The QH to init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * @urb: Holds the information about the device/endpoint needed to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * the QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * @mem_flags: Flags for allocating memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct dwc2_hcd_urb *urb, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) u32 hprt = dwc2_readl(hsotg, HPRT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dev_speed != USB_SPEED_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int bytecount = maxp_mult * maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) char *speed, *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /* Initialize QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) qh->hsotg = hsotg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) qh->wait_timer.function = &dwc2_wait_timer_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) qh->ep_type = ep_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) qh->ep_is_in = ep_is_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) qh->data_toggle = DWC2_HC_PID_DATA0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) qh->maxp = maxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) qh->maxp_mult = maxp_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) INIT_LIST_HEAD(&qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) INIT_LIST_HEAD(&qh->qh_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) qh->do_split = do_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) qh->dev_speed = dev_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (ep_is_int || ep_is_isoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /* Compute scheduling parameters once and save them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) &qh->ttport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) int device_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) qh->dwc_tt = dwc_tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ep_is_isoc, bytecount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ep_is_isoc, bytecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (do_split && dwc_tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) device_ns += dwc_tt->usb_tt->think_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) qh->device_us = NS_TO_US(device_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) qh->device_interval = urb->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) qh->host_interval = urb->interval * (do_split ? 8 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * Schedule low speed if we're running the host in low or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * full speed OR if we've got a "TT" to deal with to access this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) dwc_tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (do_split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* We won't know num transfers until we schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) qh->num_hs_transfers = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) } else if (dev_speed == USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) qh->num_hs_transfers = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) qh->num_hs_transfers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* We'll schedule later when we have something to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) switch (dev_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) case USB_SPEED_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) speed = "low";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) case USB_SPEED_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) speed = "full";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) case USB_SPEED_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) speed = "high";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) speed = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) switch (qh->ep_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) type = "isochronous";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) type = "interrupt";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) case USB_ENDPOINT_XFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) type = "control";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) type = "bulk";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) type = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) speed, bytecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dwc2_hcd_get_dev_addr(&urb->pipe_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dwc2_hcd_get_ep_num(&urb->pipe_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) ep_is_in ? "IN" : "OUT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (ep_is_int || ep_is_isoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) dwc2_sch_dbg(hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) "QH=%p ...duration: host=%d us, device=%d us\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) qh, qh->host_us, qh->device_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) qh, qh->host_interval, qh->device_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (qh->schedule_low_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) qh, dwc2_get_ls_map(hsotg, qh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * dwc2_hcd_qh_create() - Allocates and initializes a QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * @urb: Holds the information about the device/endpoint needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * to initialize the QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * @mem_flags: Flags for allocating memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * Return: Pointer to the newly allocated QH, or NULL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct dwc2_hcd_urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct dwc2_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (!urb->priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* Allocate memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) qh = kzalloc(sizeof(*qh), mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) dwc2_qh_init(hsotg, qh, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (hsotg->params.dma_desc_enable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) dwc2_hcd_qh_free(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * dwc2_hcd_qh_free() - Frees the QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * @hsotg: HCD instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * @qh: The QH to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * QH should already be removed from the list. QTD list should already be empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * if called from URB Dequeue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * Must NOT be called with interrupt disabled or spinlock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /* Make sure any unreserve work is finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (del_timer_sync(&qh->unreserve_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) spin_lock_irqsave(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) dwc2_do_unreserve(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * We don't have the lock so we can safely wait until the wait timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * finishes. Of course, at this point in time we'd better have set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * wait_timer_active to false so if this timer was still pending it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * won't do anything anyway, but we want it to finish before we free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) hrtimer_cancel(&qh->wait_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (qh->desc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) dwc2_hcd_qh_free_ddma(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) else if (hsotg->unaligned_cache && qh->dw_align_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * schedule if it is not already in the schedule. If the QH is already in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * the schedule, no action is taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * @qh: The QH to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) u32 intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) ktime_t delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (dbg_qh(qh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) dev_vdbg(hsotg->dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (!list_empty(&qh->qh_list_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /* QH already in a schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) /* Add the new QH to the appropriate schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (dwc2_qh_is_non_per(qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) /* Schedule right away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) qh->start_active_frame = hsotg->frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) qh->next_active_frame = qh->start_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (qh->want_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) list_add_tail(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) &hsotg->non_periodic_sched_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) qh->wait_timer_cancel = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) list_add_tail(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) &hsotg->non_periodic_sched_inactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) status = dwc2_schedule_periodic(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (!hsotg->periodic_qh_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) intr_mask = dwc2_readl(hsotg, GINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) intr_mask |= GINTSTS_SOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) dwc2_writel(hsotg, intr_mask, GINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) hsotg->periodic_qh_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * schedule. Memory is not freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * @hsotg: The HCD state structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * @qh: QH to remove from schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) u32 intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dev_vdbg(hsotg->dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /* If the wait_timer is pending, this will stop it from acting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) qh->wait_timer_cancel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (list_empty(&qh->qh_list_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* QH is not in a schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (dwc2_qh_is_non_per(qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) hsotg->non_periodic_qh_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) hsotg->non_periodic_qh_ptr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) list_del_init(&qh->qh_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) dwc2_deschedule_periodic(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) hsotg->periodic_qh_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (!hsotg->periodic_qh_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) !hsotg->params.dma_desc_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) intr_mask = dwc2_readl(hsotg, GINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) intr_mask &= ~GINTSTS_SOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) dwc2_writel(hsotg, intr_mask, GINTMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * This is called for setting next_active_frame for periodic splits for all but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * the first packet of the split. Confusing? I thought so...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * Periodic splits are single low/full speed transfers that we end up splitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * up into several high speed transfers. They always fit into one full (1 ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * frame but might be split over several microframes (125 us each). We to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * each of the parts on a very specific high speed frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * This function figures out where the next active uFrame needs to be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * @hsotg: The HCD state structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * @frame_number: The current frame number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * Return: number missed by (or 0 if we didn't miss).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct dwc2_qh *qh, u16 frame_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) u16 old_frame = qh->next_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) int missed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) u16 incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * See dwc2_uframe_schedule_split() for split scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * Basically: increment 1 normally, but 2 right after the start split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * (except for ISOC out).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (old_frame == qh->start_active_frame &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) incr = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) incr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * Note that it's OK for frame_number to be 1 frame past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * next_active_frame. Remember that next_active_frame is supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * be 1 frame _before_ when we want to be scheduled. If we're 1 frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * past it just means schedule ASAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * It's _not_ OK, however, if we're more than one frame past.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * OOPS, we missed. That's actually pretty bad since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * the hub will be unhappy; try ASAP I guess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) missed = dwc2_frame_num_dec(prev_frame_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) qh->next_active_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) qh->next_active_frame = frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * dwc2_next_periodic_start() - Set next_active_frame for next transfer start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * This is called for setting next_active_frame for a periodic transfer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * all cases other than midway through a periodic split. This will also update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * start_active_frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * Since we _always_ keep start_active_frame as the start of the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * transfer this is normally pretty easy: we just add our interval to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * start_active_frame and we've got our answer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * The tricks come into play if we miss. In that case we'll look for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * slot we can fit into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * @hsotg: The HCD state structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * @qh: QH for the periodic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * @frame_number: The current frame number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * Return: number missed by (or 0 if we didn't miss).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct dwc2_qh *qh, u16 frame_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) int missed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) u16 interval = qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * The dwc2_frame_num_gt() function used below won't work terribly well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * with if we just incremented by a really large intervals since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * frame counter only goes to 0x3fff. It's terribly unlikely that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * will have missed in this case anyway. Just go to exit. If we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * to try to do better we'll need to keep track of a bigger counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * somewhere in the driver and handle overflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (interval >= 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * Test for misses, which is when it's too late to schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * A few things to note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * - We compare against prev_frame_number since start_active_frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * and next_active_frame are always 1 frame before we want things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * to be active and we assume we can still get scheduled in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * current frame number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * - It's possible for start_active_frame (now incremented) to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * next_active_frame if we got an EO MISS (even_odd miss) which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * basically means that we detected there wasn't enough time for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * at the last second. We want to make sure we don't schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * another transfer for the same frame. My test webcam doesn't seem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * terribly upset by missing a transfer but really doesn't like when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * we do two transfers in the same frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * - Some misses are expected. Specifically, in order to work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * perfectly dwc2 really needs quite spectacular interrupt latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * requirements. It needs to be able to handle its interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * completely within 125 us of them being asserted. That not only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * means that the dwc2 interrupt handler needs to be fast but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * means that nothing else in the system has to block dwc2 for a long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * time. We can help with the dwc2 parts of this, but it's hard to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * guarantee that a system will have interrupt latency < 125 us, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * we have to be robust to some misses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (qh->start_active_frame == qh->next_active_frame ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) u16 ideal_start = qh->start_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int periods_in_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * Adjust interval as per gcd with map size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * See pmap_schedule() for more details here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) interval = gcd(interval, periods_in_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) qh->start_active_frame = dwc2_frame_num_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) qh->start_active_frame, interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) } while (dwc2_frame_num_gt(prev_frame_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) qh->start_active_frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) missed = dwc2_frame_num_dec(qh->start_active_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) ideal_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) qh->next_active_frame = qh->start_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * Deactivates a QH. For non-periodic QHs, removes the QH from the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * non-periodic schedule. The QH is added to the inactive non-periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * schedule if any QTDs are still attached to the QH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * For periodic QHs, the QH is removed from the periodic queued schedule. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * there are any QTDs still attached to the QH, the QH is added to either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * periodic inactive schedule or the periodic ready schedule and its next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * scheduled frame is calculated. The QH is placed in the ready schedule if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * the scheduled frame has been reached already. Otherwise it's placed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * inactive schedule. If there are no QTDs attached to the QH, the QH is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * completely removed from the periodic schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) int sched_next_periodic_split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) u16 old_frame = qh->next_active_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) u16 frame_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) int missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (dbg_qh(qh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) dev_vdbg(hsotg->dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (dwc2_qh_is_non_per(qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) dwc2_hcd_qh_unlink(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (!list_empty(&qh->qtd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* Add back to inactive/waiting non-periodic schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) dwc2_hcd_qh_add(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * Use the real frame number rather than the cached value as of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * last SOF just to get us a little closer to reality. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * means we don't actually know if we've already handled the SOF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * interrupt for this frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) frame_number = dwc2_hcd_get_frame_number(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (sched_next_periodic_split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) dwc2_sch_vdbg(hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) qh, sched_next_periodic_split, frame_number, old_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) qh->next_active_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) dwc2_frame_num_dec(qh->next_active_frame, old_frame),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) missed, missed ? "MISS" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (list_empty(&qh->qtd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) dwc2_hcd_qh_unlink(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * Remove from periodic_sched_queued and move to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * appropriate queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * Note: we purposely use the frame_number from the "hsotg" structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * since we know SOF interrupt will handle future frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) list_move_tail(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) &hsotg->periodic_sched_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) list_move_tail(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) &hsotg->periodic_sched_inactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) * dwc2_hcd_qtd_init() - Initializes a QTD structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * @qtd: The QTD to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * @urb: The associated URB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) qtd->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * The only time the QTD data toggle is used is on the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * phase of control transfers. This phase always starts with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * DATA1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) qtd->data_toggle = DWC2_HC_PID_DATA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) qtd->control_phase = DWC2_CONTROL_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* Start split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) qtd->complete_split = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) qtd->isoc_split_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) qtd->in_process = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /* Store the qtd ptr in the urb to reference the QTD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) urb->qtd = qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * Caller must hold driver lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * @hsotg: The DWC HCD structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * @qtd: The QTD to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * @qh: Queue head to add qtd to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * If the QH to which the QTD is added is not currently scheduled, it is placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * into the proper schedule based on its EP type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (unlikely(!qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) retval = dwc2_hcd_qh_add(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) qtd->qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }