^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * xHCI host controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Sarah Sharp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Some code borrowed from the Linux EHCI driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xhci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xhci-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xhci-debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Allocates a generic ring segment from the ring pool, sets the dma address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * initializes the segment to zero, and sets the private next pointer to NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Section 4.11.1.1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * "All components of all Command and Transfer TRBs shall be initialized to '0'"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned int cycle_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int max_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (!seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!seg->trbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) kfree(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (max_packet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) seg->bounce_buf = kzalloc_node(max_packet, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!seg->bounce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) dma_pool_free(xhci->segment_pool, seg->trbs, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) kfree(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (cycle_state == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) for (i = 0; i < TRBS_PER_SEGMENT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) seg->dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) seg->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (seg->trbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) seg->trbs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) kfree(seg->bounce_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) kfree(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) EXPORT_SYMBOL_GPL(xhci_segment_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct xhci_segment *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) seg = first->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) while (seg != first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct xhci_segment *next = seg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) xhci_segment_free(xhci, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) seg = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) xhci_segment_free(xhci, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Make the prev segment point to the next segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Change the last TRB in the prev segment to be a Link TRB which points to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * DMA address of the next segment. The caller needs to set any Link TRB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * related flags, such as End TRB, Toggle Cycle, and no snoop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void xhci_link_segments(struct xhci_segment *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct xhci_segment *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) enum xhci_ring_type type, bool chain_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!prev || !next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) prev->next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (type != TYPE_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) cpu_to_le64(next->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) val &= ~TRB_TYPE_BITMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) val |= TRB_TYPE(TRB_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (chain_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) val |= TRB_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL_GPL(xhci_link_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Link the ring to the new segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Set Toggle Cycle for the new ring if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct xhci_segment *first, struct xhci_segment *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int num_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct xhci_segment *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bool chain_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!ring || !first || !last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) chain_links = !!(xhci_link_trb_quirk(xhci) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (ring->type == TYPE_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (xhci->quirks & XHCI_AMD_0x96_HOST)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) next = ring->enq_seg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) xhci_link_segments(last, next, ring->type, chain_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ring->num_segs += num_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &= ~cpu_to_le32(LINK_TOGGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) last->trbs[TRBS_PER_SEGMENT-1].link.control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) |= cpu_to_le32(LINK_TOGGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ring->last_seg = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * We need a radix tree for mapping physical addresses of TRBs to which stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * ID they belong to. We need to do this because the host controller won't tell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * us which stream ring the TRB came from. We could store the stream ID in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * event data TRB, but that doesn't help us for the cancellation case, since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * endpoint may stop before it reaches that event data TRB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * The radix tree maps the upper portion of the TRB DMA address to a ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * segment that has the same upper portion of DMA addresses. For example, say I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * have segments of size 1KB, that are always 1KB aligned. A segment may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * pass the radix tree a key to get the right stream ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * 0x10c90fff >> 10 = 0x43243
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * 0x10c912c0 >> 10 = 0x43244
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * 0x10c91400 >> 10 = 0x43245
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Obviously, only those TRBs with DMA addresses that are within the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * will make the radix tree return the stream ID for that ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Caveats for the radix tree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * extended systems (where the DMA address can be bigger than 32-bits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct xhci_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct xhci_segment *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Skip any segments that were already added. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (radix_tree_lookup(trb_address_map, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ret = radix_tree_maybe_preload(mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret = radix_tree_insert(trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) key, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct xhci_segment *seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (radix_tree_lookup(trb_address_map, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) radix_tree_delete(trb_address_map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int xhci_update_stream_segment_mapping(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct radix_tree_root *trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct xhci_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct xhci_segment *first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct xhci_segment *last_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct xhci_segment *failed_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (WARN_ON_ONCE(trb_address_map == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) seg = first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ret = xhci_insert_segment_mapping(trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ring, seg, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) goto remove_streams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (seg == last_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) seg = seg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) } while (seg != first_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) remove_streams:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) failed_seg = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) seg = first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) xhci_remove_segment_mapping(trb_address_map, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (seg == failed_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) seg = seg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } while (seg != first_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void xhci_remove_stream_mapping(struct xhci_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (WARN_ON_ONCE(ring->trb_address_map == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) seg = ring->first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xhci_remove_segment_mapping(ring->trb_address_map, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) seg = seg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) } while (seg != ring->first_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ring->first_seg, ring->last_seg, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* XXX: Do we need the hcd structure in all these functions? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) trace_xhci_ring_free(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (ring->first_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ring->type == TYPE_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) xhci_remove_stream_mapping(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) xhci_free_segments_for_ring(xhci, ring->first_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) EXPORT_SYMBOL_GPL(xhci_ring_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void xhci_initialize_ring_info(struct xhci_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned int cycle_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* The ring is empty, so the enqueue pointer == dequeue pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ring->enqueue = ring->first_seg->trbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ring->enq_seg = ring->first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ring->dequeue = ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ring->deq_seg = ring->first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* The ring is initialized to 0. The producer must write 1 to the cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * bit to handover ownership of the TRB, so PCS = 1. The consumer must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * compare CCS to the cycle bit to check ownership, so CCS = 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * New rings are initialized with cycle state equal to 1; if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * handling ring expansion, set the cycle state equal to the old ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ring->cycle_state = cycle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Each segment has a link TRB, and leave an extra TRB for SW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * accounting purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Allocate segments and link them for a ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct xhci_segment **first, struct xhci_segment **last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned int num_segs, unsigned int cycle_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct xhci_segment *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bool chain_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) chain_links = !!(xhci_link_trb_quirk(xhci) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) (type == TYPE_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) (xhci->quirks & XHCI_AMD_0x96_HOST)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) num_segs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *first = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) while (num_segs > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct xhci_segment *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) prev = *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) while (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) next = prev->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) xhci_segment_free(xhci, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) prev = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) xhci_link_segments(prev, next, type, chain_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) prev = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) num_segs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) xhci_link_segments(prev, *first, type, chain_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *last = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ops && ops->free_container_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ops->free_container_ctx(xhci, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int type, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (ops && ops->alloc_container_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ops->alloc_container_ctx(xhci, ctx, type, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u32 endpoint_type, enum xhci_ring_type ring_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int max_packet, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (ops && ops->alloc_transfer_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) max_packet, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct xhci_virt_device *virt_dev, unsigned int ep_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ops && ops->free_transfer_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ops->free_transfer_ring(xhci, virt_dev, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct xhci_virt_device *virt_dev, unsigned int ep_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ops && ops->is_usb_offload_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Create a new ring with zero or more segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Link each segment together into a ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Set the end flag and the cycle toggle bit on the last segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * See section 4.9.1 and figures 15 and 16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unsigned int num_segs, unsigned int cycle_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct xhci_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ring->num_segs = num_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ring->bounce_buf_len = max_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) INIT_LIST_HEAD(&ring->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ring->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (num_segs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) &ring->last_seg, num_segs, cycle_state, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) max_packet, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Only event ring does not use link TRB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (type != TYPE_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* See section 4.9.2.1 and 6.4.4.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) cpu_to_le32(LINK_TOGGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) xhci_initialize_ring_info(ring, cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) trace_xhci_ring_alloc(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) EXPORT_SYMBOL_GPL(xhci_ring_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct xhci_virt_device *virt_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int ep_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) virt_dev->eps[ep_index].ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Expand an existing ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Allocate a new ring which has same segment numbers and link the two rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned int num_trbs, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct xhci_segment *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct xhci_segment *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) unsigned int num_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned int num_segs_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) (TRBS_PER_SEGMENT - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Allocate number of segments we needed, or double the ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) num_segs = ring->num_segs > num_segs_needed ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ring->num_segs : num_segs_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) num_segs, ring->cycle_state, ring->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ring->bounce_buf_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ring->type == TYPE_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ring, first, last, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct xhci_segment *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) next = first->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) xhci_segment_free(xhci, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (first == last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) first = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) } while (true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) xhci_link_rings(xhci, ring, first, last, num_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) trace_xhci_ring_expansion(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) "ring expansion succeed, now has %d segments",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ring->num_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int type, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct xhci_container_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ctx->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (type == XHCI_CTX_TYPE_INPUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ctx->size += CTX_SIZE(xhci->hcc_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) (ops && ops->alloc_container_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!ctx->bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) void xhci_free_container_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct xhci_container_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) (ops && ops->free_container_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) xhci_vendor_free_container_ctx(xhci, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct xhci_input_control_ctx *xhci_get_input_control_ctx(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct xhci_container_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (ctx->type != XHCI_CTX_TYPE_INPUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return (struct xhci_input_control_ctx *)ctx->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct xhci_container_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ctx->type == XHCI_CTX_TYPE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return (struct xhci_slot_ctx *)ctx->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return (struct xhci_slot_ctx *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) (ctx->bytes + CTX_SIZE(xhci->hcc_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) EXPORT_SYMBOL_GPL(xhci_get_slot_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct xhci_container_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned int ep_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* increment ep index by offset of start of ep ctx array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ep_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (ctx->type == XHCI_CTX_TYPE_INPUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ep_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return (struct xhci_ep_ctx *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /***************** Streams structures manipulation *************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned int num_stream_ctxs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (size > MEDIUM_STREAM_ARRAY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dma_free_coherent(dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) stream_ctx, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) else if (size <= SMALL_STREAM_ARRAY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return dma_pool_free(xhci->small_streams_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) stream_ctx, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return dma_pool_free(xhci->medium_streams_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) stream_ctx, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * The stream context array for each endpoint with bulk streams enabled can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * vary in size, based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * - how many streams the endpoint supports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * - the maximum primary stream array size the host controller supports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * - and how many streams the device driver asks for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * The stream context array must be a power of 2, and can be as small as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * 64 bytes or as large as 1MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned int num_stream_ctxs, dma_addr_t *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (size > MEDIUM_STREAM_ARRAY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return dma_alloc_coherent(dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dma, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) else if (size <= SMALL_STREAM_ARRAY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return dma_pool_alloc(xhci->small_streams_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) mem_flags, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return dma_pool_alloc(xhci->medium_streams_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) mem_flags, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct xhci_ring *xhci_dma_to_transfer_ring(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct xhci_virt_ep *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) u64 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (ep->ep_state & EP_HAS_STREAMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return radix_tree_lookup(&ep->stream_info->trb_address_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) address >> TRB_SEGMENT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return ep->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Change an endpoint's internal structure so it supports stream IDs. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * number of requested streams includes stream 0, which cannot be used by device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * The number of stream contexts in the stream context array may be bigger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * the number of streams the driver wants to use. This is because the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * stream context array entries must be a power of two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned int num_stream_ctxs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned int num_streams,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned int max_packet, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct xhci_stream_info *stream_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u32 cur_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct xhci_ring *cur_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) xhci_dbg(xhci, "Allocating %u streams and %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) "stream context array entries.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) num_streams, num_stream_ctxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) xhci->cmd_ring_reserved_trbs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!stream_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto cleanup_trbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) stream_info->num_streams = num_streams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) stream_info->num_stream_ctxs = num_stream_ctxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Initialize the array of virtual pointers to stream rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) stream_info->stream_rings = kcalloc_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) num_streams, sizeof(struct xhci_ring *), mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (!stream_info->stream_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto cleanup_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Initialize the array of DMA addresses for stream rings for the HW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) num_stream_ctxs, &stream_info->ctx_array_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!stream_info->stream_ctx_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goto cleanup_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) memset(stream_info->stream_ctx_array, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Allocate everything needed to free the stream rings later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) stream_info->free_streams_command =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) xhci_alloc_command_with_ctx(xhci, true, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (!stream_info->free_streams_command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto cleanup_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* Allocate rings for all the streams that the driver will use,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * and add their segment DMA addresses to the radix tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * Stream 0 is reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) stream_info->stream_rings[cur_stream] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) cur_ring = stream_info->stream_rings[cur_stream];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!cur_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) goto cleanup_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) cur_ring->stream_id = cur_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cur_ring->trb_address_map = &stream_info->trb_address_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* Set deq ptr, cycle bit, and stream context type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) addr = cur_ring->first_seg->dma |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) SCT_FOR_CTX(SCT_PRI_TR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) cur_ring->cycle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) stream_info->stream_ctx_array[cur_stream].stream_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) cpu_to_le64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cur_stream, (unsigned long long) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ret = xhci_update_stream_mapping(cur_ring, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) xhci_ring_free(xhci, cur_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) stream_info->stream_rings[cur_stream] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto cleanup_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Leave the other unused stream ring pointers in the stream context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * array initialized to zero. This will cause the xHC to give us an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * error if the device asks for a stream ID we don't have setup (if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * was any other way, the host controller would assume the ring is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * "empty" and wait forever for data to be queued to that stream ID).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return stream_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) cleanup_rings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) cur_ring = stream_info->stream_rings[cur_stream];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (cur_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) xhci_ring_free(xhci, cur_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) stream_info->stream_rings[cur_stream] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) xhci_free_command(xhci, stream_info->free_streams_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) cleanup_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) kfree(stream_info->stream_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) cleanup_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) kfree(stream_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) cleanup_trbs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) xhci->cmd_ring_reserved_trbs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * Sets the MaxPStreams field and the Linear Stream Array field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Sets the dequeue pointer to the stream context array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct xhci_ep_ctx *ep_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct xhci_stream_info *stream_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u32 max_primary_streams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* MaxPStreams is the number of stream context array entries, not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) "Setting number of stream ctx array entries to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 1 << (max_primary_streams + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) | EP_HAS_LSA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Sets the MaxPStreams field and the Linear Stream Array field to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * not at the beginning of the ring).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct xhci_virt_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* Frees all stream contexts associated with the endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Caller should fix the endpoint context streams fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) void xhci_free_stream_info(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct xhci_stream_info *stream_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int cur_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct xhci_ring *cur_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!stream_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) for (cur_stream = 1; cur_stream < stream_info->num_streams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cur_stream++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cur_ring = stream_info->stream_rings[cur_stream];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (cur_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) xhci_ring_free(xhci, cur_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) stream_info->stream_rings[cur_stream] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) xhci_free_command(xhci, stream_info->free_streams_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) xhci->cmd_ring_reserved_trbs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (stream_info->stream_ctx_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) xhci_free_stream_ctx(xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) stream_info->num_stream_ctxs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) stream_info->stream_ctx_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) stream_info->ctx_array_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) kfree(stream_info->stream_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) kfree(stream_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /***************** Device context manipulation *************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct xhci_virt_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ep->xhci = xhci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static void xhci_free_tt_info(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct xhci_virt_device *virt_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int slot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct list_head *tt_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct xhci_tt_bw_info *tt_info, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) bool slot_found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /* If the device never made it past the Set Address stage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * it may not have the real_port set correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (virt_dev->real_port == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) xhci_dbg(xhci, "Bad real port.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* Multi-TT hubs will have more than one entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (tt_info->slot_id == slot_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) slot_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) list_del(&tt_info->tt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) kfree(tt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) } else if (slot_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int xhci_alloc_tt_info(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct xhci_virt_device *virt_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct usb_device *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct usb_tt *tt, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct xhci_tt_bw_info *tt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned int num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!tt->multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) num_ports = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) num_ports = hdev->maxchild;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) for (i = 0; i < num_ports; i++, tt_info++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct xhci_interval_bw_table *bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!tt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto free_tts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) INIT_LIST_HEAD(&tt_info->tt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) list_add(&tt_info->tt_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) &xhci->rh_bw[virt_dev->real_port - 1].tts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) tt_info->slot_id = virt_dev->udev->slot_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (tt->multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) tt_info->ttport = i+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bw_table = &tt_info->bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (j = 0; j < XHCI_MAX_INTERVAL; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) free_tts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* All the xhci_tds in the ring's TD list should be freed at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * Should be called with xhci->lock held if there is any chance the TT lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * will be manipulated by the configure endpoint, allocate device, or update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * hub functions while this function is removing the TT entries from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct xhci_virt_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int old_active_eps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Slot ID 0 is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (slot_id == 0 || !xhci->devs[slot_id])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dev = xhci->devs[slot_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) trace_xhci_free_virt_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (dev->tt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) old_active_eps = dev->tt_info->active_eps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) for (i = 0; i < 31; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (dev->eps[i].ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) xhci_free_endpoint_ring(xhci, dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (dev->eps[i].stream_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) xhci_free_stream_info(xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dev->eps[i].stream_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Endpoints on the TT/root port lists should have been removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * when usb_disable_device() was called for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * We can't drop them anyway, because the udev might have gone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * away by this point, and we can't tell what speed it was.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!list_empty(&dev->eps[i].bw_endpoint_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) xhci_warn(xhci, "Slot %u endpoint %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) "not removed from BW list!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) slot_id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* If this is a hub, free the TT(s) from the TT list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) xhci_free_tt_info(xhci, dev, slot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* If necessary, update the number of active TTs on this root port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) xhci_update_tt_active_eps(xhci, dev, old_active_eps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (dev->in_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) xhci_free_container_ctx(xhci, dev->in_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (dev->out_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) xhci_free_container_ctx(xhci, dev->out_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (dev->udev && dev->udev->slot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev->udev->slot_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) kfree(xhci->devs[slot_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) xhci->devs[slot_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Free a virt_device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * If the virt_device added a tt_info (a hub) and has children pointing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * that tt_info, then free the child first. Recursive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * We can't rely on udev at this point to find child-parent relationships.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct xhci_virt_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct list_head *tt_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct xhci_tt_bw_info *tt_info, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) vdev = xhci->devs[slot_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (vdev->real_port == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) xhci_dbg(xhci, "Bad vdev->real_port.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* is this a hub device that added a tt_info to the tts list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (tt_info->slot_id == slot_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* are any devices using this tt_info? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) vdev = xhci->devs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (vdev && (vdev->tt_info == tt_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) xhci_free_virt_devices_depth_first(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) xhci, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* we are now at a leaf device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) xhci_debugfs_remove_slot(xhci, slot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) xhci_free_virt_device(xhci, slot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct usb_device *udev, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct xhci_virt_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Slot ID 0 is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (slot_id == 0 || xhci->devs[slot_id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dev = kzalloc(sizeof(*dev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dev->slot_id = slot_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Allocate the (output) device context that will be used in the HC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!dev->out_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) (unsigned long long)dev->out_ctx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Allocate the (input) device context for address device command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (!dev->in_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) (unsigned long long)dev->in_ctx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* Initialize the cancellation list and watchdog timers for each ep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) for (i = 0; i < 31; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) dev->eps[i].ep_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) dev->eps[i].vdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xhci_init_endpoint_timer(xhci, &dev->eps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* Allocate endpoint 0 ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!dev->eps[0].ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dev->udev = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* Point to output device context in dcbaa. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) slot_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) &xhci->dcbaa->dev_context_ptrs[slot_id],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) trace_xhci_alloc_virt_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) xhci->devs[slot_id] = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (dev->in_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) xhci_free_container_ctx(xhci, dev->in_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (dev->out_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) xhci_free_container_ctx(xhci, dev->out_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct usb_device *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct xhci_virt_device *virt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct xhci_ep_ctx *ep0_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct xhci_ring *ep_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) virt_dev = xhci->devs[udev->slot_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ep_ring = virt_dev->eps[0].ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * FIXME we don't keep track of the dequeue pointer very well after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Set TR dequeue pointer, so we're setting the dequeue pointer of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * host to our enqueue pointer. This should only be called after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * configured device has reset, so all control transfers should have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * been completed or cancelled before the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ep_ring->enqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) | ep_ring->cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * The xHCI roothub may have ports of differing speeds in any order in the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * status registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * The xHCI hardware wants to know the roothub port number that the USB device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * is attached to (or the roothub port its ancestor hub is attached to). All we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * know is the index of that port under either the USB 2.0 or the USB 3.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * roothub, but that doesn't give us the real index into the HW port status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * registers. Call xhci_find_raw_port_number() to get real index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct usb_device *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct usb_device *top_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct usb_hcd *hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (udev->speed >= USB_SPEED_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) hcd = xhci->shared_hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) hcd = xhci->main_hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) top_dev = top_dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* Found device below root hub */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return xhci_find_raw_port_number(hcd, top_dev->portnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* Setup an xHCI virtual device for a Set Address command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct xhci_virt_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct xhci_ep_ctx *ep0_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct xhci_slot_ctx *slot_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) u32 port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) u32 max_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct usb_device *top_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev = xhci->devs[udev->slot_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* Slot ID 0 is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (udev->slot_id == 0 || !dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) udev->slot_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* 3) Only the control endpoint is valid - one endpoint context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) switch (udev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) case USB_SPEED_SUPER_PLUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) max_packets = MAX_PACKET(512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) case USB_SPEED_SUPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) max_packets = MAX_PACKET(512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) case USB_SPEED_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) max_packets = MAX_PACKET(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* USB core guesses at a 64-byte max packet first for FS devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) case USB_SPEED_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) max_packets = MAX_PACKET(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) case USB_SPEED_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) max_packets = MAX_PACKET(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) case USB_SPEED_WIRELESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* Speed was set earlier, this shouldn't happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /* Find the root hub port this device is under */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) port_num = xhci_find_real_port_number(xhci, udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Set the port number in the virtual_device to the faked port number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) top_dev = top_dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Found device below root hub */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) dev->fake_port = top_dev->portnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) dev->real_port = port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* Find the right bandwidth table that this device will be a part of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * If this is a full speed device attached directly to a root port (or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * decendent of one), it counts as a primary bandwidth domain, not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * secondary bandwidth domain under a TT. An xhci_tt_info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * will never be created for the HS root hub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!udev->tt || !udev->tt->hub->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct xhci_root_port_bw_info *rh_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct xhci_tt_bw_info *tt_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) rh_bw = &xhci->rh_bw[port_num - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* Find the right TT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (tt_bw->slot_id != udev->tt->hub->slot_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!dev->udev->tt->multi ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) (udev->tt->multi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) tt_bw->ttport == dev->udev->ttport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dev->bw_table = &tt_bw->bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) dev->tt_info = tt_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!dev->tt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* Is this a LS/FS device under an external HS hub? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (udev->tt && udev->tt->hub->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) (udev->ttport << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (udev->tt->multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* Step 4 - ring already allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* Step 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) max_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dev->eps[0].ring->cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) trace_xhci_setup_addressable_virt_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * Convert interval expressed as 2^(bInterval - 1) == interval into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * straight exponent value 2^n == interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) unsigned int interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (interval != ep->desc.bInterval - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) dev_warn(&udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) "ep %#x - rounding interval to %d %sframes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ep->desc.bEndpointAddress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 1 << interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) udev->speed == USB_SPEED_FULL ? "" : "micro");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (udev->speed == USB_SPEED_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * Full speed isoc endpoints specify interval in frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * not microframes. We are using microframes everywhere,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * so adjust accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) interval += 3; /* 1 frame = 2^3 uframes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * microframes, rounded down to nearest power of 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct usb_host_endpoint *ep, unsigned int desc_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) unsigned int min_exponent, unsigned int max_exponent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) unsigned int interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) interval = fls(desc_interval) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) interval = clamp_val(interval, min_exponent, max_exponent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if ((1 << interval) != desc_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) dev_dbg(&udev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ep->desc.bEndpointAddress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 1 << interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) desc_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (ep->desc.bInterval == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return xhci_microframes_to_exponent(udev, ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ep->desc.bInterval, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return xhci_microframes_to_exponent(udev, ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ep->desc.bInterval * 8, 3, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Return the polling or NAK interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * The polling interval is expressed in "microframes". If xHCI's Interval field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * is set to N, it will service the endpoint every 2^(Interval)*125us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * is set to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) unsigned int interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) switch (udev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case USB_SPEED_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Max NAK rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (usb_endpoint_xfer_control(&ep->desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) usb_endpoint_xfer_bulk(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) interval = xhci_parse_microframe_interval(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) fallthrough; /* SS and HS isoc/int have same decoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) case USB_SPEED_SUPER_PLUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) case USB_SPEED_SUPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (usb_endpoint_xfer_int(&ep->desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) usb_endpoint_xfer_isoc(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) interval = xhci_parse_exponent_interval(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) case USB_SPEED_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (usb_endpoint_xfer_isoc(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) interval = xhci_parse_exponent_interval(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * Fall through for interrupt endpoint interval decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * since it uses the same rules as low speed interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case USB_SPEED_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (usb_endpoint_xfer_int(&ep->desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) usb_endpoint_xfer_isoc(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) interval = xhci_parse_frame_interval(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * High speed endpoint descriptors can define "the number of additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * transaction opportunities per microframe", but that goes in the Max Burst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * endpoint context field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static u32 xhci_get_endpoint_mult(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (udev->speed < USB_SPEED_SUPER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) !usb_endpoint_xfer_isoc(&ep->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return ep->ss_ep_comp.bmAttributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* Super speed and Plus have max burst in ep companion desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (udev->speed >= USB_SPEED_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return ep->ss_ep_comp.bMaxBurst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (udev->speed == USB_SPEED_HIGH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) (usb_endpoint_xfer_isoc(&ep->desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) usb_endpoint_xfer_int(&ep->desc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return usb_endpoint_maxp_mult(&ep->desc) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) in = usb_endpoint_dir_in(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) switch (usb_endpoint_type(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) case USB_ENDPOINT_XFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return CTRL_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return in ? BULK_IN_EP : BULK_OUT_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return in ? ISOC_IN_EP : ISOC_OUT_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return in ? INT_IN_EP : INT_OUT_EP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* Return the maximum endpoint service interval time (ESIT) payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * Basically, this is the maxpacket size, multiplied by the burst size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * and mult size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static u32 xhci_get_max_esit_payload(struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) int max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) int max_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /* Only applies for interrupt or isochronous endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (usb_endpoint_xfer_control(&ep->desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) usb_endpoint_xfer_bulk(&ep->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /* SuperSpeedPlus Isoc ep sending over 48k per esit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) else if (udev->speed >= USB_SPEED_SUPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) max_packet = usb_endpoint_maxp(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) max_burst = usb_endpoint_maxp_mult(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* A 0 in max burst means 1 transfer per ESIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return max_packet * max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* Set up an endpoint with one ring segment. Do not allocate stream rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * Drivers will have to call usb_alloc_streams() to do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int xhci_endpoint_init(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct xhci_virt_device *virt_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct usb_device *udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct usb_host_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unsigned int ep_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct xhci_ep_ctx *ep_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct xhci_ring *ep_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) unsigned int max_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) enum xhci_ring_type ring_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) u32 max_esit_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) u32 endpoint_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) unsigned int max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) unsigned int interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) unsigned int mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) unsigned int avg_trb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) unsigned int err_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ep_index = xhci_get_endpoint_index(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) endpoint_type = xhci_get_endpoint_type(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (!endpoint_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) ring_type = usb_endpoint_type(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * Get values to fill the endpoint context, mostly from ep descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * The average TRB buffer lengt for bulk endpoints is unclear as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * have no clue on scatter gather list entry size. For Isoc and Int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) max_esit_payload = xhci_get_max_esit_payload(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) interval = xhci_get_endpoint_interval(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) /* Periodic endpoint bInterval limit quirk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (usb_endpoint_xfer_int(&ep->desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) usb_endpoint_xfer_isoc(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) udev->speed >= USB_SPEED_HIGH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) interval >= 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) interval = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) mult = xhci_get_endpoint_mult(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) max_packet = usb_endpoint_maxp(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) max_burst = xhci_get_endpoint_max_burst(udev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) avg_trb_len = max_esit_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /* FIXME dig Mult and streams info out of ep companion desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* Allow 3 retries for everything but isoc, set CErr = 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!usb_endpoint_xfer_isoc(&ep->desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) err_count = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (usb_endpoint_xfer_bulk(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (udev->speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) max_packet = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (udev->speed == USB_SPEED_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) max_packet = rounddown_pow_of_two(max_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) max_packet = clamp_val(max_packet, 8, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) avg_trb_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) mult = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* Set up the endpoint ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) usb_endpoint_xfer_isoc(&ep->desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) virt_dev->eps[ep_index].new_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) max_packet, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) virt_dev->eps[ep_index].new_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (!virt_dev->eps[ep_index].new_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) virt_dev->eps[ep_index].skip = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) ep_ring = virt_dev->eps[ep_index].new_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* Fill the endpoint context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) EP_INTERVAL(interval) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) EP_MULT(mult));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) MAX_PACKET(max_packet) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) MAX_BURST(max_burst) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) ERROR_COUNT(err_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) ep_ring->cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) EP_AVG_TRB_LENGTH(avg_trb_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) void xhci_endpoint_zero(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct xhci_virt_device *virt_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct usb_host_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) unsigned int ep_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct xhci_ep_ctx *ep_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ep_index = xhci_get_endpoint_index(&ep->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ep_ctx->ep_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) ep_ctx->ep_info2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ep_ctx->deq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ep_ctx->tx_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Don't free the endpoint ring until the set interface or configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * request succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) bw_info->ep_interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) bw_info->mult = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) bw_info->num_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) bw_info->max_packet_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) bw_info->type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) bw_info->max_esit_payload = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) void xhci_update_bw_info(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct xhci_container_ctx *in_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct xhci_input_control_ctx *ctrl_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct xhci_virt_device *virt_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) struct xhci_bw_info *bw_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct xhci_ep_ctx *ep_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) unsigned int ep_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) for (i = 1; i < 31; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) bw_info = &virt_dev->eps[i].bw_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* We can't tell what endpoint type is being dropped, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * unconditionally clearing the bandwidth info for non-periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * endpoints should be harmless because the info will never be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * set in the first place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /* Dropped endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) xhci_clear_endpoint_bw_info(bw_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (EP_IS_ADDED(ctrl_ctx, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* Ignore non-periodic endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) ep_type != ISOC_IN_EP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) ep_type != INT_IN_EP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* Added or changed endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) bw_info->ep_interval = CTX_TO_EP_INTERVAL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) le32_to_cpu(ep_ctx->ep_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* Number of packets and mult are zero-based in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * input context, but we want one-based for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * interval table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) bw_info->mult = CTX_TO_EP_MULT(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) le32_to_cpu(ep_ctx->ep_info)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) bw_info->num_packets = CTX_TO_MAX_BURST(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) le32_to_cpu(ep_ctx->ep_info2)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) bw_info->max_packet_size = MAX_PACKET_DECODED(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) le32_to_cpu(ep_ctx->ep_info2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) bw_info->type = ep_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) le32_to_cpu(ep_ctx->tx_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * Useful when you want to change one particular aspect of the endpoint and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * issue a configure endpoint command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) void xhci_endpoint_copy(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct xhci_container_ctx *in_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct xhci_container_ctx *out_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) unsigned int ep_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct xhci_ep_ctx *out_ep_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct xhci_ep_ctx *in_ep_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) in_ep_ctx->ep_info = out_ep_ctx->ep_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) in_ep_ctx->deq = out_ep_ctx->deq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) in_ep_ctx->tx_info = out_ep_ctx->tx_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (xhci->quirks & XHCI_MTK_HOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * Useful when you want to change one particular aspect of the endpoint and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * issue a configure endpoint command. Only the context entries field matters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * but we'll copy the whole thing anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) void xhci_slot_copy(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct xhci_container_ctx *in_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct xhci_container_ctx *out_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct xhci_slot_ctx *in_slot_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) struct xhci_slot_ctx *out_slot_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) in_slot_ctx->dev_info = out_slot_ctx->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) in_slot_ctx->tt_info = out_slot_ctx->tt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) in_slot_ctx->dev_state = out_slot_ctx->dev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) "Allocating %d scratchpad buffers", num_sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (!num_sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (!xhci->scratchpad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) goto fail_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) num_sp * sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) &xhci->scratchpad->sp_dma, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (!xhci->scratchpad->sp_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) goto fail_sp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (!xhci->scratchpad->sp_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) goto fail_sp3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) for (i = 0; i < num_sp; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) goto fail_sp4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) xhci->scratchpad->sp_array[i] = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) xhci->scratchpad->sp_buffers[i] = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) fail_sp4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) for (i = i - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dma_free_coherent(dev, xhci->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) xhci->scratchpad->sp_buffers[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) xhci->scratchpad->sp_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) kfree(xhci->scratchpad->sp_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) fail_sp3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) dma_free_coherent(dev, num_sp * sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) xhci->scratchpad->sp_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) xhci->scratchpad->sp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) fail_sp2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) kfree(xhci->scratchpad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) xhci->scratchpad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) fail_sp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static void scratchpad_free(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) int num_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (!xhci->scratchpad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) for (i = 0; i < num_sp; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) dma_free_coherent(dev, xhci->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) xhci->scratchpad->sp_buffers[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) xhci->scratchpad->sp_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) kfree(xhci->scratchpad->sp_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) dma_free_coherent(dev, num_sp * sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) xhci->scratchpad->sp_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) xhci->scratchpad->sp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) kfree(xhci->scratchpad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) xhci->scratchpad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) bool allocate_completion, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct xhci_command *command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (!command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (allocate_completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) command->completion =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) kzalloc_node(sizeof(struct completion), mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (!command->completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) kfree(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) init_completion(command->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) command->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) INIT_LIST_HEAD(&command->cmd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) EXPORT_SYMBOL_GPL(xhci_alloc_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) bool allocate_completion, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) struct xhci_command *command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (!command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (!command->in_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) kfree(command->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) kfree(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) void xhci_urb_free_priv(struct urb_priv *urb_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) kfree(urb_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) void xhci_free_command(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct xhci_command *command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) xhci_free_container_ctx(xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) command->in_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) kfree(command->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) kfree(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) EXPORT_SYMBOL_GPL(xhci_free_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) int xhci_alloc_erst(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) struct xhci_ring *evt_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) struct xhci_erst *erst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct xhci_erst_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) size, &erst->erst_dma_addr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (!erst->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) erst->num_entries = evt_ring->num_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) seg = evt_ring->first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) for (val = 0; val < evt_ring->num_segs; val++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) entry = &erst->entries[val];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) entry->seg_addr = cpu_to_le64(seg->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) entry->rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) seg = seg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) EXPORT_SYMBOL_GPL(xhci_alloc_erst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (erst->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) dma_free_coherent(dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) erst->entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) erst->erst_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) erst->entries = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) EXPORT_SYMBOL_GPL(xhci_free_erst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct xhci_hcd *xhci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (ops && ops->alloc_dcbaa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return ops->alloc_dcbaa(xhci, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (ops && ops->free_dcbaa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ops->free_dcbaa(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) void xhci_mem_cleanup(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) int i, j, num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) cancel_delayed_work_sync(&xhci->cmd_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) xhci_free_erst(xhci, &xhci->erst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (xhci->event_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) xhci_ring_free(xhci, xhci->event_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) xhci->event_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (xhci->lpm_command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) xhci_free_command(xhci, xhci->lpm_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) xhci->lpm_command = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (xhci->cmd_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) xhci_ring_free(xhci, xhci->cmd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) xhci->cmd_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) xhci_cleanup_command_queue(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) for (i = 0; i < num_ports && xhci->rh_bw; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) struct list_head *ep = &bwt->interval_bw[j].endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) while (!list_empty(ep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) list_del_init(ep->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) xhci_free_virt_devices_depth_first(xhci, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) dma_pool_destroy(xhci->segment_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) xhci->segment_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) dma_pool_destroy(xhci->device_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) xhci->device_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) dma_pool_destroy(xhci->small_streams_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) xhci->small_streams_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) "Freed small stream array pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) dma_pool_destroy(xhci->medium_streams_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) xhci->medium_streams_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) "Freed medium stream array pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) xhci_vendor_free_dcbaa(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (xhci->dcbaa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) dma_free_coherent(dev, sizeof(*xhci->dcbaa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) xhci->dcbaa, xhci->dcbaa->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) xhci->dcbaa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) scratchpad_free(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (!xhci->rh_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) goto no_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) for (i = 0; i < num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct xhci_tt_bw_info *tt, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) list_del(&tt->tt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) kfree(tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) no_bw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) xhci->cmd_ring_reserved_trbs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) xhci->usb2_rhub.num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) xhci->usb3_rhub.num_ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) xhci->num_active_eps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) kfree(xhci->usb2_rhub.ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) kfree(xhci->usb3_rhub.ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) kfree(xhci->hw_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) kfree(xhci->rh_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) kfree(xhci->ext_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) for (i = 0; i < xhci->num_port_caps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) kfree(xhci->port_caps[i].psi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) kfree(xhci->port_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) xhci->num_port_caps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) xhci->usb2_rhub.ports = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) xhci->usb3_rhub.ports = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) xhci->hw_ports = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) xhci->rh_bw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) xhci->ext_caps = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) xhci->port_caps = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) xhci->page_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) xhci->page_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) xhci->usb2_rhub.bus_state.bus_suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) xhci->usb3_rhub.bus_state.bus_suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct xhci_segment *input_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) union xhci_trb *start_trb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) union xhci_trb *end_trb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) dma_addr_t input_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct xhci_segment *result_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) char *test_name, int test_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) unsigned long long start_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) unsigned long long end_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (seg != result_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) test_name, test_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) xhci_warn(xhci, "Tested TRB math w/ seg %p and "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) "input DMA 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) input_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) (unsigned long long) input_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) "ending TRB %p (0x%llx DMA)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) start_trb, start_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) end_trb, end_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) xhci_warn(xhci, "Expected seg %p, got seg %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) result_seg, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) dma_addr_t input_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct xhci_segment *result_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) } simple_test_vector [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /* A zeroed DMA field should fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) { 0, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) /* One TRB before the ring start should fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) { xhci->event_ring->first_seg->dma - 16, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /* One byte before the ring start should fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) { xhci->event_ring->first_seg->dma - 1, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) /* Starting TRB should succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* Ending TRB should succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) xhci->event_ring->first_seg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /* One byte after the ring end should fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /* One TRB after the ring end should fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* An address of all ones should fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) { (dma_addr_t) (~0), NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) struct xhci_segment *input_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) union xhci_trb *start_trb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) union xhci_trb *end_trb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) dma_addr_t input_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct xhci_segment *result_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) } complex_test_vector [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Test feeding a valid DMA address from a different ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) .start_trb = xhci->event_ring->first_seg->trbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) .input_dma = xhci->cmd_ring->first_seg->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /* Test feeding a valid end TRB from a different ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) .start_trb = xhci->event_ring->first_seg->trbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) .input_dma = xhci->cmd_ring->first_seg->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) /* Test feeding a valid start and end TRB from a different ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) .start_trb = xhci->cmd_ring->first_seg->trbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) .input_dma = xhci->cmd_ring->first_seg->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) /* TRB in this ring, but after this TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) .start_trb = &xhci->event_ring->first_seg->trbs[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) .end_trb = &xhci->event_ring->first_seg->trbs[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) .input_dma = xhci->event_ring->first_seg->dma + 4*16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) /* TRB in this ring, but before this TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .start_trb = &xhci->event_ring->first_seg->trbs[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .end_trb = &xhci->event_ring->first_seg->trbs[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .input_dma = xhci->event_ring->first_seg->dma + 2*16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* TRB in this ring, but after this wrapped TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) .end_trb = &xhci->event_ring->first_seg->trbs[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) .input_dma = xhci->event_ring->first_seg->dma + 2*16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /* TRB in this ring, but before this wrapped TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) .end_trb = &xhci->event_ring->first_seg->trbs[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) /* TRB not in this ring, and we have a wrapped TD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) { .input_seg = xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) .end_trb = &xhci->event_ring->first_seg->trbs[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) .result_seg = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) unsigned int num_tests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) num_tests = ARRAY_SIZE(simple_test_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) for (i = 0; i < num_tests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) ret = xhci_test_trb_in_td(xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) xhci->event_ring->first_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) xhci->event_ring->first_seg->trbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) simple_test_vector[i].input_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) simple_test_vector[i].result_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) "Simple", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) num_tests = ARRAY_SIZE(complex_test_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) for (i = 0; i < num_tests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) ret = xhci_test_trb_in_td(xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) complex_test_vector[i].input_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) complex_test_vector[i].start_trb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) complex_test_vector[i].end_trb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) complex_test_vector[i].input_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) complex_test_vector[i].result_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) "Complex", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) xhci_dbg(xhci, "TRB math tests passed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) u64 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) dma_addr_t deq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) xhci->event_ring->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (deq == 0 && !in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) xhci_warn(xhci, "WARN something wrong with SW event ring "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) "dequeue ptr.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) /* Update HC event ring dequeue pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) temp &= ERST_PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /* Don't clear the EHB bit (which is RW1C) because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * there might be more events to service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) temp &= ~ERST_EHB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) "// Write event ring dequeue pointer, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) "preserving EHB bit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) &xhci->ir_set->erst_dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) __le32 __iomem *addr, int max_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) u32 temp, port_offset, port_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) u8 major_revision, minor_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) struct xhci_hub *rhub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) struct xhci_port_cap *port_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) temp = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) major_revision = XHCI_EXT_PORT_MAJOR(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) minor_revision = XHCI_EXT_PORT_MINOR(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (major_revision == 0x03) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) rhub = &xhci->usb3_rhub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * Some hosts incorrectly use sub-minor version for minor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * for bcdUSB 0x310). Since there is no USB release with sub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * minor version 0x301 to 0x309, we can assume that they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * incorrect and fix it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (minor_revision > 0x00 && minor_revision < 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) minor_revision <<= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) } else if (major_revision <= 0x02) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) rhub = &xhci->usb2_rhub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) xhci_warn(xhci, "Ignoring unknown port speed, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) "Ext Cap %p, revision = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) addr, major_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /* Ignoring port protocol we can't understand. FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (rhub->min_rev < minor_revision)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) rhub->min_rev = minor_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* Port offset and count in the third dword, see section 7.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) temp = readl(addr + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) port_offset = XHCI_EXT_PORT_OFF(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) port_count = XHCI_EXT_PORT_COUNT(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) "Ext Cap %p, port offset = %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) "count = %u, revision = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) addr, port_offset, port_count, major_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /* Port count includes the current port offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /* WTF? "Valid values are ‘1’ to MaxPorts" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) port_cap = &xhci->port_caps[xhci->num_port_caps++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (xhci->num_port_caps > max_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) port_cap->maj_rev = major_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) port_cap->min_rev = minor_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (port_cap->psi_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) port_cap->psi = kcalloc_node(port_cap->psi_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) sizeof(*port_cap->psi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) GFP_KERNEL, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (!port_cap->psi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) port_cap->psi_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) port_cap->psi_uid_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) for (i = 0; i < port_cap->psi_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) port_cap->psi[i] = readl(addr + 4 + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) /* count unique ID values, two consecutive entries can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * have the same ID if link is assymetric
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) port_cap->psi_uid_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) XHCI_EXT_PORT_PLT(port_cap->psi[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) XHCI_EXT_PORT_PFD(port_cap->psi[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) XHCI_EXT_PORT_LP(port_cap->psi[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /* cache usb2 port capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) xhci->ext_caps[xhci->num_ext_caps++] = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) (temp & XHCI_HLC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) "xHCI 1.0: support USB2 hardware lpm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) xhci->hw_lpm_support = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) port_offset--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) for (i = port_offset; i < (port_offset + port_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct xhci_port *hw_port = &xhci->hw_ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) /* Duplicate entry. Ignore the port if the revisions differ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (hw_port->rhub) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) " port %u\n", addr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) xhci_warn(xhci, "Port was marked as USB %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) "duplicated as USB %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) hw_port->rhub->maj_rev, major_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /* Only adjust the roothub port counts if we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * found a similar duplicate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (hw_port->rhub != rhub &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) hw_port->hcd_portnum != DUPLICATE_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) hw_port->rhub->num_ports--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) hw_port->hcd_portnum = DUPLICATE_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) hw_port->rhub = rhub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) hw_port->port_cap = port_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) rhub->num_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /* FIXME: Should we disable ports not in the Extended Capabilities? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct xhci_hub *rhub, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) int port_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (!rhub->num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (!rhub->ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (xhci->hw_ports[i].rhub != rhub ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) xhci->hw_ports[i].hcd_portnum = port_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) rhub->ports[port_index] = &xhci->hw_ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) port_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (port_index == rhub->num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * specify what speeds each port is supposed to be. We can't count on the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * speed bits in the PORTSC register being correct until a device is connected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * but we need to set up the two fake roothubs with the correct number of USB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * 3.0 and USB 2.0 ports at host controller initialization time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) unsigned int num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) int cap_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) u32 cap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (!xhci->hw_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) for (i = 0; i < num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) NUM_PORT_REGS * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) xhci->hw_ports[i].hw_portnum = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (!xhci->rh_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) for (i = 0; i < num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) struct xhci_interval_bw_table *bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) bw_table = &xhci->rh_bw[i].bw_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) for (j = 0; j < XHCI_MAX_INTERVAL; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) base = &xhci->cap_regs->hc_capbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (!cap_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) offset = cap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) /* count extended protocol capability entries for later caching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) while (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) cap_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) offset = xhci_find_next_ext_cap(base, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) XHCI_EXT_CAPS_PROTOCOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (!xhci->ext_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) flags, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (!xhci->port_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) offset = cap_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) while (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) offset = xhci_find_next_ext_cap(base, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) XHCI_EXT_CAPS_PROTOCOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) xhci_warn(xhci, "No ports on the roothubs?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) "Found %u USB 2.0 ports and %u USB 3.0 ports.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) /* Place limits on the number of roothub ports so that the hub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) * descriptors aren't longer than the USB core will allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) "Limiting USB 3.0 roothub ports to %u.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) USB_SS_MAXPORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) "Limiting USB 2.0 roothub ports to %u.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) USB_MAXCHILDREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * Not sure how the USB core will handle a hub with no ports...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) unsigned int val, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) u64 val_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) u32 page_size, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) INIT_LIST_HEAD(&xhci->cmd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) /* init command timeout work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) init_completion(&xhci->cmd_ring_stop_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) page_size = readl(&xhci->op_regs->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) "Supported page size register = 0x%x", page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if ((0x1 & page_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) page_size = page_size >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (i < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) "Supported page size of %iK", (1 << (i+12)) / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) xhci_warn(xhci, "WARN: no supported page size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) /* Use 4K pages, since that's common and the minimum the HC supports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) xhci->page_shift = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) xhci->page_size = 1 << xhci->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) "HCD page size set to %iK", xhci->page_size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * Program the Number of Device Slots Enabled field in the CONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * register with the max value of slots the HC can handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) "// xHC can handle at most %d device slots.", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) val2 = readl(&xhci->op_regs->config_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) val |= (val2 & ~HCS_SLOTS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) "// Setting Max device slots reg = 0x%x.", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) writel(val, &xhci->op_regs->config_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * xHCI section 5.4.6 - doorbell array must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) * "physically contiguous and 64-byte (cache line) aligned".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (!xhci->dcbaa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) if (!xhci->dcbaa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) xhci->dcbaa->dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) "// Device context base array address = 0x%llx (DMA), %p (virt)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * Initialize the ring segment pool. The ring must be a contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * structure comprised of TRBs. The TRBs must be 16 byte aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) * however, the command ring segment needs 64-byte aligned segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) * and our use of dma addresses in the trb_address_map radix tree needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) /* See Table 46 and Note on Figure 55 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 2112, 64, xhci->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) if (!xhci->segment_pool || !xhci->device_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* Linear stream context arrays don't have any boundary restrictions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * and only need to be 16-byte aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) xhci->small_streams_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) dma_pool_create("xHCI 256 byte stream ctx arrays",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) xhci->medium_streams_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) dma_pool_create("xHCI 1KB stream ctx arrays",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * will be allocated with dma_alloc_coherent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /* Set up the command ring to have one segments for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (!xhci->cmd_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) "Allocated command ring at %p", xhci->cmd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) (unsigned long long)xhci->cmd_ring->first_seg->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) /* Set the address in the Command Ring Control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) xhci->cmd_ring->cycle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) "// Setting command ring address to 0x%016llx", val_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (!xhci->lpm_command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /* Reserve one command ring TRB for disabling LPM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * Since the USB core grabs the shared usb_bus bandwidth mutex before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * disabling LPM, we only need to reserve one TRB for all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) xhci->cmd_ring_reserved_trbs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) val = readl(&xhci->cap_regs->db_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) val &= DBOFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) "// Doorbell array is located at offset 0x%x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) " from cap regs base addr", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) xhci->dba = (void __iomem *) xhci->cap_regs + val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* Set ir_set to interrupt register set 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) xhci->ir_set = &xhci->run_regs->ir_set[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * Event ring setup: Allocate a normal ring, but also setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) * the event ring segment table (ERST). Section 4.9.3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (!xhci->event_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (xhci_check_trb_in_td_math(xhci) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) /* set ERST count with the number of entries in the segment table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) val = readl(&xhci->ir_set->erst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) val &= ERST_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) val |= ERST_NUM_SEGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) "// Write ERST size = %i to ir_set 0 (some bits preserved)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) writel(val, &xhci->ir_set->erst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) "// Set ERST entries to point to event ring.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /* set the segment table base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) "// Set ERST base address for ir_set 0 = 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) (unsigned long long)xhci->erst.erst_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) val_64 &= ERST_PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) /* Set the event ring dequeue address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) xhci_set_hc_event_deq(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) xhci_dbg_trace(xhci, trace_xhci_dbg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) "Wrote ERST address to ir_set 0.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) * XXX: Might need to set the Interrupter Moderation Register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * something other than the default (~1ms minimum between interrupts).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * See section 5.5.1.2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) for (i = 0; i < MAX_HC_SLOTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) xhci->devs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) for (i = 0; i < USB_MAXCHILDREN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) xhci->usb2_rhub.bus_state.resume_done[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) xhci->usb3_rhub.bus_state.resume_done[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) /* Only the USB 2.0 completions will ever be used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (scratchpad_alloc(xhci, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (xhci_setup_port_arrays(xhci, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) /* Enable USB 3.0 device notifications for function remote wake, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * is necessary for allowing USB 3.0 devices to do remote wakeup from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * U3 (device suspend).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) temp = readl(&xhci->op_regs->dev_notification);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) temp &= ~DEV_NOTE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) temp |= DEV_NOTE_FWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) writel(temp, &xhci->op_regs->dev_notification);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) xhci_halt(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) xhci_mem_cleanup(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }