Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * xhci-dbgcap.c - xHCI debug capability support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2017 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author: Lu Baolu <baolu.lu@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/nls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include "xhci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "xhci-trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "xhci-dbgcap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) /* we use only one segment for DbC rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	if (ring->first_seg && ring->first_seg->trbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 		dma_free_coherent(dev, TRB_SEGMENT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 				  ring->first_seg->trbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 				  ring->first_seg->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 		kfree(ring->first_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct usb_string_descriptor	*s_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	u32				string_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	/* Serial string: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	s_desc = (struct usb_string_descriptor *)strings->serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 			DBC_MAX_STRING_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	s_desc->bDescriptorType	= USB_DT_STRING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	string_length		= s_desc->bLength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	string_length		<<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	/* Product string: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	s_desc = (struct usb_string_descriptor *)strings->product;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 			DBC_MAX_STRING_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	s_desc->bDescriptorType	= USB_DT_STRING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	string_length		+= s_desc->bLength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	string_length		<<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	/* Manufacture string: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	s_desc = (struct usb_string_descriptor *)strings->manufacturer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 			strlen(DBC_STRING_MANUFACTURER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 			DBC_MAX_STRING_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	s_desc->bDescriptorType	= USB_DT_STRING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	string_length		+= s_desc->bLength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	string_length		<<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	/* String0: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	strings->string0[0]	= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	strings->string0[1]	= USB_DT_STRING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	strings->string0[2]	= 0x09;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	strings->string0[3]	= 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	string_length		+= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return string_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct dbc_info_context	*info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct xhci_ep_ctx	*ep_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	u32			dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	dma_addr_t		deq, dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	unsigned int		max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	/* Populate info Context: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	info			= (struct dbc_info_context *)dbc->ctx->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	dma			= dbc->string_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	info->string0		= cpu_to_le64(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	info->length		= cpu_to_le32(string_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	/* Populate bulk out endpoint context: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	ep_ctx			= dbc_bulkout_ctx(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	deq			= dbc_bulkout_enq(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	ep_ctx->ep_info		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* Populate bulk in endpoint context: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	ep_ctx			= dbc_bulkin_ctx(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	deq			= dbc_bulkin_enq(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	ep_ctx->ep_info		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	/* Set DbC context and info registers: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	writel(dev_info, &dbc->regs->devinfo1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	writel(dev_info, &dbc->regs->devinfo2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static void xhci_dbc_giveback(struct dbc_request *req, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	__releases(&dbc->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	__acquires(&dbc->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct xhci_dbc		*dbc = req->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	struct device		*dev = dbc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	list_del_init(&req->list_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	req->trb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	req->trb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (req->status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		req->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	trace_xhci_dbc_giveback_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	dma_unmap_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			 req->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			 req->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			 dbc_ep_dma_direction(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	/* Give back the transfer request: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	spin_unlock(&dbc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	req->complete(dbc, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	spin_lock(&dbc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static void xhci_dbc_flush_single_request(struct dbc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	union xhci_trb	*trb = req->trb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	trb->generic.field[0]	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	trb->generic.field[1]	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	trb->generic.field[2]	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	xhci_dbc_giveback(req, -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	struct dbc_request	*req, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		xhci_dbc_flush_single_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) struct dbc_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	struct dbc_request	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (direction != BULK_IN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	    direction != BULK_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	req = kzalloc(sizeof(*req), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	req->dbc = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	INIT_LIST_HEAD(&req->list_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	INIT_LIST_HEAD(&req->list_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	req->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	trace_xhci_dbc_alloc_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) dbc_free_request(struct dbc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	trace_xhci_dbc_free_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		   u32 field2, u32 field3, u32 field4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	union xhci_trb		*trb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	trb = ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	trb->generic.field[0]	= cpu_to_le32(field1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	trb->generic.field[1]	= cpu_to_le32(field2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	trb->generic.field[2]	= cpu_to_le32(field3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	trb->generic.field[3]	= cpu_to_le32(field4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	ring->num_trbs_free--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	next = ++(ring->enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (TRB_TYPE_LINK_LE32(next->link.control)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		next->link.control ^= cpu_to_le32(TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		ring->enqueue = ring->enq_seg->trbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		ring->cycle_state ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				  struct dbc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u64			addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	union xhci_trb		*trb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	unsigned int		num_trbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct xhci_dbc		*dbc = req->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct xhci_ring	*ring = dep->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u32			length, control, cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	num_trbs = count_trbs(req->dma, req->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	WARN_ON(num_trbs != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if (ring->num_trbs_free < num_trbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	addr	= req->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	trb	= ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	cycle	= ring->cycle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	length	= TRB_LEN(req->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		control &= cpu_to_le32(~TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		control |= cpu_to_le32(TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	req->trb = ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	xhci_dbc_queue_trb(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			   lower_32_bits(addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			   upper_32_bits(addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			   length, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * Add a barrier between writes of trb fields and flipping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 * the cycle bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	if (cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) dbc_ep_do_queue(struct dbc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	int			ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	struct xhci_dbc		*dbc = req->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	struct device		*dev = dbc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct dbc_ep		*dep = &dbc->eps[req->direction];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (!req->length || !req->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	req->actual		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	req->status		= -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	req->dma = dma_map_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 				  req->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				  req->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				  dbc_ep_dma_direction(dep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	if (dma_mapping_error(dev, req->dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		dev_err(dbc->dev, "failed to map buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	ret = xhci_dbc_queue_bulk_tx(dep, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		dev_err(dbc->dev, "failed to queue trbs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		dma_unmap_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 				 req->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				 req->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				 dbc_ep_dma_direction(dep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	list_add_tail(&req->list_pending, &dep->list_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) int dbc_ep_queue(struct dbc_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	struct xhci_dbc		*dbc = req->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	int			ret = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (req->direction != BULK_IN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	    req->direction != BULK_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	spin_lock_irqsave(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (dbc->state == DS_CONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		ret = dbc_ep_do_queue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	spin_unlock_irqrestore(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	mod_delayed_work(system_wq, &dbc->event_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	trace_xhci_dbc_queue_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	struct dbc_ep		*dep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	dep			= &dbc->eps[direction];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	dep->dbc		= dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	dep->direction		= direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	INIT_LIST_HEAD(&dep->list_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	xhci_dbc_do_eps_init(dbc, BULK_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	xhci_dbc_do_eps_init(dbc, BULK_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		    struct xhci_erst *erst, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 					   &erst->erst_dma_addr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (!erst->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	erst->num_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	erst->entries[0].rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (erst->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 				  erst->entries, erst->erst_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	erst->entries = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) static struct xhci_container_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) dbc_alloc_ctx(struct device *dev, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct xhci_container_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	ctx = kzalloc(sizeof(*ctx), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	ctx->size = 3 * DBC_CONTEXT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (!ctx->bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static struct xhci_ring *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct xhci_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct xhci_segment *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	ring = kzalloc(sizeof(*ring), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	ring->num_segs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	ring->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	seg = kzalloc(sizeof(*seg), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (!seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		goto seg_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	ring->first_seg = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	ring->last_seg = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	seg->next = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (!seg->trbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		goto dma_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	seg->dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	/* Only event ring does not use link TRB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (type != TYPE_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		trb->link.segment_ptr = cpu_to_le64(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	INIT_LIST_HEAD(&ring->td_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	xhci_initialize_ring_info(ring, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) dma_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	kfree(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) seg_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	int			ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	dma_addr_t		deq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	u32			string_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct device		*dev = dbc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* Allocate various rings for events and transfers: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (!dbc->ring_evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		goto evt_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (!dbc->ring_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		goto in_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (!dbc->ring_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Allocate and populate ERST: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		goto erst_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	/* Allocate context data structure: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (!dbc->ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		goto ctx_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	/* Allocate the string table: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	dbc->string_size = sizeof(struct dbc_str_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	dbc->string = dma_alloc_coherent(dev, dbc->string_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 					 &dbc->string_dma, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (!dbc->string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		goto string_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	/* Setup ERST register: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	writel(dbc->erst.erst_size, &dbc->regs->ersts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 				   dbc->ring_evt->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	lo_hi_writeq(deq, &dbc->regs->erdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	/* Setup strings and contexts: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	string_length = xhci_dbc_populate_strings(dbc->string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	xhci_dbc_init_contexts(dbc, string_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	xhci_dbc_eps_init(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	dbc->state = DS_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) string_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	dbc_free_ctx(dev, dbc->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	dbc->ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) ctx_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	dbc_erst_free(dev, &dbc->erst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) erst_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	dbc_ring_free(dev, dbc->ring_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	dbc->ring_out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	dbc_ring_free(dev, dbc->ring_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	dbc->ring_in = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) in_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	dbc_ring_free(dev, dbc->ring_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	dbc->ring_evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) evt_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	xhci_dbc_eps_exit(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (dbc->string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		dma_free_coherent(dbc->dev, dbc->string_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				  dbc->string, dbc->string_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		dbc->string = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	dbc_free_ctx(dbc->dev, dbc->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	dbc->ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	dbc_erst_free(dbc->dev, &dbc->erst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	dbc_ring_free(dbc->dev, dbc->ring_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	dbc_ring_free(dbc->dev, dbc->ring_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	dbc_ring_free(dbc->dev, dbc->ring_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	dbc->ring_in = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	dbc->ring_out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	dbc->ring_evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static int xhci_do_dbc_start(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	int			ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	u32			ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (dbc->state != DS_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	writel(0, &dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	ret = xhci_handshake(&dbc->regs->control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			     DBC_CTRL_DBC_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			     0, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	ctrl = readl(&dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	       &dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	ret = xhci_handshake(&dbc->regs->control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			     DBC_CTRL_DBC_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			     DBC_CTRL_DBC_ENABLE, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	dbc->state = DS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (dbc->state == DS_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	writel(0, &dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	dbc->state = DS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static int xhci_dbc_start(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	int			ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	WARN_ON(!dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	spin_lock_irqsave(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	ret = xhci_do_dbc_start(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	spin_unlock_irqrestore(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		pm_runtime_put(dbc->dev); /* note this was self.controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	return mod_delayed_work(system_wq, &dbc->event_work, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) static void xhci_dbc_stop(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	WARN_ON(!dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	switch (dbc->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	case DS_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	case DS_CONFIGURED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	case DS_STALLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		if (dbc->driver->disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			dbc->driver->disconnect(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	cancel_delayed_work_sync(&dbc->event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	spin_lock_irqsave(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	ret = xhci_do_dbc_stop(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	spin_unlock_irqrestore(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		xhci_dbc_mem_cleanup(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	u32			portsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	portsc = readl(&dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (portsc & DBC_PORTSC_CONN_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		dev_info(dbc->dev, "DbC port connect change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (portsc & DBC_PORTSC_RESET_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		dev_info(dbc->dev, "DbC port reset change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	if (portsc & DBC_PORTSC_LINK_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		dev_info(dbc->dev, "DbC port link status change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (portsc & DBC_PORTSC_CONFIG_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		dev_info(dbc->dev, "DbC config error change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	/* Port reset change bit will be cleared in other place: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	struct dbc_ep		*dep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	struct xhci_ring	*ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	int			ep_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	u32			comp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	size_t			remain_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct dbc_request	*req = NULL, *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	dep		= (ep_id == EPID_OUT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				get_out_ep(dbc) : get_in_ep(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	ring		= dep->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	switch (comp_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	case COMP_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		remain_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	case COMP_SHORT_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	case COMP_TRB_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	case COMP_BABBLE_DETECTED_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	case COMP_USB_TRANSACTION_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	case COMP_STALL_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		status = -comp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		status = -comp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	/* Match the pending request: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	list_for_each_entry(r, &dep->list_pending, list_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		if (r->trb_dma == event->trans_event.buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			req = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		dev_warn(dbc->dev, "no matched request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	ring->num_trbs_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	req->actual = req->length - remain_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	xhci_dbc_giveback(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static void inc_evt_deq(struct xhci_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* If on the last TRB of the segment go back to the beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		ring->cycle_state ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		ring->dequeue = ring->deq_seg->trbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	ring->dequeue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	dma_addr_t		deq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	struct dbc_ep		*dep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	union xhci_trb		*evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	u32			ctrl, portsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	bool			update_erdp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	/* DbC state machine: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	switch (dbc->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	case DS_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	case DS_INITIALIZED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		return EVT_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	case DS_ENABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		portsc = readl(&dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		if (portsc & DBC_PORTSC_CONN_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			dbc->state = DS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			dev_info(dbc->dev, "DbC connected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		return EVT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	case DS_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		ctrl = readl(&dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		if (ctrl & DBC_CTRL_DBC_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			dbc->state = DS_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			dev_info(dbc->dev, "DbC configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			portsc = readl(&dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			writel(portsc, &dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			return EVT_GSER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		return EVT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	case DS_CONFIGURED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		/* Handle cable unplug event: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		portsc = readl(&dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		    !(portsc & DBC_PORTSC_CONN_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			dev_info(dbc->dev, "DbC cable unplugged\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			dbc->state = DS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			xhci_dbc_flush_requests(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			return EVT_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		/* Handle debug port reset event: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		if (portsc & DBC_PORTSC_RESET_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			dev_info(dbc->dev, "DbC port reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			writel(portsc, &dbc->regs->portsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			dbc->state = DS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			xhci_dbc_flush_requests(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			return EVT_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		/* Handle endpoint stall event: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		ctrl = readl(&dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		    (ctrl & DBC_CTRL_HALT_OUT_TR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			dev_info(dbc->dev, "DbC Endpoint stall\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			dbc->state = DS_STALLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			if (ctrl & DBC_CTRL_HALT_IN_TR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				dep = get_in_ep(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				xhci_dbc_flush_endpoint_requests(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			if (ctrl & DBC_CTRL_HALT_OUT_TR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				dep = get_out_ep(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				xhci_dbc_flush_endpoint_requests(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			return EVT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		/* Clear DbC run change bit: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			writel(ctrl, &dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			ctrl = readl(&dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	case DS_STALLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		ctrl = readl(&dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		    !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		    (ctrl & DBC_CTRL_DBC_RUN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			dbc->state = DS_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		return EVT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/* Handle the events in the event ring: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	evt = dbc->ring_evt->dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			dbc->ring_evt->cycle_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		 * Add a barrier between reading the cycle flag and any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		 * reads of the event's flags/data below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		case TRB_TYPE(TRB_PORT_STATUS):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			dbc_handle_port_status(dbc, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		case TRB_TYPE(TRB_TRANSFER):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			dbc_handle_xfer_event(dbc, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		inc_evt_deq(dbc->ring_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		evt = dbc->ring_evt->dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		update_erdp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	/* Update event ring dequeue pointer: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (update_erdp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 					   dbc->ring_evt->dequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		lo_hi_writeq(deq, &dbc->regs->erdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return EVT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static void xhci_dbc_handle_events(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	enum evtreturn		evtr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct xhci_dbc		*dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	spin_lock_irqsave(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	evtr = xhci_dbc_do_handle_events(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	spin_unlock_irqrestore(&dbc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	switch (evtr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	case EVT_GSER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		if (dbc->driver->configure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			dbc->driver->configure(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	case EVT_DISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (dbc->driver->disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			dbc->driver->disconnect(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	case EVT_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		dev_info(dbc->dev, "stop handling dbc events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	mod_delayed_work(system_wq, &dbc->event_work, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	spin_lock_irqsave(&xhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	kfree(xhci->dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	xhci->dbc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	spin_unlock_irqrestore(&xhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) static int xhci_do_dbc_init(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	u32			reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	struct xhci_dbc		*dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	void __iomem		*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	int			dbc_cap_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	base = &xhci->cap_regs->hc_capbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (!dbc_cap_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	dbc->regs = base + dbc_cap_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	/* We will avoid using DbC in xhci driver if it's in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	reg = readl(&dbc->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (reg & DBC_CTRL_DBC_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		kfree(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	spin_lock_irqsave(&xhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (xhci->dbc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		spin_unlock_irqrestore(&xhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		kfree(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	xhci->dbc = dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	spin_unlock_irqrestore(&xhci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	dbc->xhci = xhci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	spin_lock_init(&dbc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) static ssize_t dbc_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	const char		*p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct xhci_dbc		*dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	struct xhci_hcd		*xhci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	dbc = xhci->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	switch (dbc->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	case DS_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		p = "disabled";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	case DS_INITIALIZED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		p = "initialized";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	case DS_ENABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		p = "enabled";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	case DS_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		p = "connected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	case DS_CONFIGURED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		p = "configured";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	case DS_STALLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		p = "stalled";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		p = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	return sprintf(buf, "%s\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static ssize_t dbc_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			 struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct xhci_hcd		*xhci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct xhci_dbc		*dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	dbc = xhci->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (!strncmp(buf, "enable", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		xhci_dbc_start(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	else if (!strncmp(buf, "disable", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		xhci_dbc_stop(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static DEVICE_ATTR_RW(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int xhci_dbc_init(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int			ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	ret = xhci_do_dbc_init(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		goto init_err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	ret = xhci_dbc_tty_probe(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		goto init_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	ret = device_create_file(dev, &dev_attr_dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		goto init_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) init_err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	xhci_dbc_tty_remove(xhci->dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) init_err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	xhci_do_dbc_exit(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) init_err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) void xhci_dbc_exit(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (!xhci->dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	device_remove_file(dev, &dev_attr_dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	xhci_dbc_tty_remove(xhci->dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	xhci_dbc_stop(xhci->dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	xhci_do_dbc_exit(xhci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int xhci_dbc_suspend(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct xhci_dbc		*dbc = xhci->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (dbc->state == DS_CONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		dbc->resume_required = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	xhci_dbc_stop(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) int xhci_dbc_resume(struct xhci_hcd *xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	int			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct xhci_dbc		*dbc = xhci->dbc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	if (!dbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (dbc->resume_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		dbc->resume_required = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		xhci_dbc_start(dbc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #endif /* CONFIG_PM */