Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2004-2013 Synopsys, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *    notice, this list of conditions, and the following disclaimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *    without modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *    notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *    documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * 3. The names of the above-listed copyright holders may not be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    to endorse or promote products derived from this software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *    specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * ALTERNATIVELY, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * GNU General Public License ("GPL") as published by the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * Foundation; either version 2 of the License, or (at your option) any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * This file contains the Descriptor DMA implementation for Host mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/usb/hcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/usb/ch11.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include "hcd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) static u16 dwc2_frame_list_idx(u16 frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	return frame & (FRLISTEN_64_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	return (idx + inc) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		  MAX_DMA_DESC_NUM_GENERIC) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	return (idx - inc) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		  MAX_DMA_DESC_NUM_GENERIC) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		qh->dev_speed == USB_SPEED_HIGH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	return qh->dev_speed == USB_SPEED_HIGH ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	       (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 				gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct kmem_cache *desc_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	    qh->dev_speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		desc_cache = hsotg->desc_hsisoc_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		desc_cache = hsotg->desc_gen_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 						dwc2_max_desc_num(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	if (!qh->desc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 					   qh->desc_list_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (!qh->n_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		dma_unmap_single(hsotg->dev, qh->desc_list_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 				 qh->desc_list_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		kmem_cache_free(desc_cache, qh->desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		qh->desc_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	struct kmem_cache *desc_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	    qh->dev_speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		desc_cache = hsotg->desc_hsisoc_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		desc_cache = hsotg->desc_gen_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (qh->desc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		dma_unmap_single(hsotg->dev, qh->desc_list_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 				 qh->desc_list_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		kmem_cache_free(desc_cache, qh->desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		qh->desc_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	kfree(qh->n_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	qh->n_bytes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	if (hsotg->frame_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	if (!hsotg->frame_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 					       hsotg->frame_list_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 					       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	spin_lock_irqsave(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (!hsotg->frame_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			 hsotg->frame_list_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	kfree(hsotg->frame_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	hsotg->frame_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	u32 hcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	spin_lock_irqsave(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	hcfg = dwc2_readl(hsotg, HCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if (hcfg & HCFG_PERSCHEDENA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		/* already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	hcfg &= ~HCFG_FRLISTEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	hcfg |= fr_list_en | HCFG_PERSCHEDENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	dwc2_writel(hsotg, hcfg, HCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	u32 hcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	spin_lock_irqsave(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	hcfg = dwc2_readl(hsotg, HCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (!(hcfg & HCFG_PERSCHEDENA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		/* already disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	hcfg &= ~HCFG_PERSCHEDENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	dwc2_writel(hsotg, hcfg, HCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * Activates/Deactivates FrameList entries for the channel based on endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * servicing period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 				   int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct dwc2_host_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	u16 i, j, inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (!hsotg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		pr_err("hsotg = %p\n", hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (!qh->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (!hsotg->frame_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			hsotg->frame_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	chan = qh->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	inc = dwc2_frame_incr_val(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		i = dwc2_frame_list_idx(qh->next_active_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	j = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			hsotg->frame_list[j] |= 1 << chan->hc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			hsotg->frame_list[j] &= ~(1 << chan->hc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		j = (j + inc) & (FRLISTEN_64_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	} while (j != i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 * Sync frame list since controller will access it if periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * channel is currently enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 				   hsotg->frame_list_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 				   hsotg->frame_list_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 				   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	chan->schinfo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		/* TODO - check this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		inc = (8 + qh->host_interval - 1) / qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		for (i = 0; i < inc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			chan->schinfo |= j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			j = j << qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		chan->schinfo = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				      struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	struct dwc2_host_chan *chan = qh->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (dwc2_qh_is_non_per(qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		if (hsotg->params.uframe_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			hsotg->available_host_channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			hsotg->non_periodic_channels--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		dwc2_update_frame_list(hsotg, qh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		hsotg->available_host_channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 * The condition is added to prevent double cleanup try in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	if (chan->qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		if (!list_empty(&chan->hc_list_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			list_del(&chan->hc_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		dwc2_hc_cleanup(hsotg, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		chan->qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	qh->channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	qh->ntd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (qh->desc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		       dwc2_max_desc_num(qh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * related members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * @qh:    The QH to init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * @mem_flags: Indicates the type of memory allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * Allocates memory for the descriptor list. For the first periodic QH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * allocates memory for the FrameList and enables periodic scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			  gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (qh->do_split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		dev_err(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			"SPLIT Transfers are not supported in Descriptor DMA mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		goto err0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	    qh->ep_type == USB_ENDPOINT_XFER_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		if (!hsotg->frame_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			retval = dwc2_frame_list_alloc(hsotg, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			/* Enable periodic schedule on first periodic QH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	qh->ntd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	dwc2_desc_list_free(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) err0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * @qh:    The QH to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  * Frees descriptor list memory associated with the QH. If QH is periodic and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * the last, frees FrameList memory and disables periodic scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	dwc2_desc_list_free(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 * Channel still assigned due to some reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 * Seen on Isoc URB dequeue. Channel halted but no subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 * ChHalted interrupt to release the channel. Afterwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 * when it comes here from endpoint disable routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 * channel remains assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	spin_lock_irqsave(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (qh->channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		dwc2_release_channel_ddma(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	spin_unlock_irqrestore(&hsotg->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	     qh->ep_type == USB_ENDPOINT_XFER_INT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	    (hsotg->params.uframe_sched ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	     !hsotg->periodic_channels) && hsotg->frame_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		dwc2_per_sched_disable(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		dwc2_frame_list_free(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (qh->dev_speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		/* Descriptor set (8 descriptors) index which is 8-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * Determine starting frame for Isochronous transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * Few frames skipped to prevent race condition with HC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				    struct dwc2_qh *qh, u16 *skip_frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	u16 frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 * next_active_frame is always frame number (not uFrame) both in FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	 * and HS!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	 * skip_frames is used to limit activated descriptors number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 * to avoid the situation when HC services the last activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * descriptor firstly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 * Example for FS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * Current frame is 1, scheduled frame is 3. Since HC always fetches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 * the descriptor corresponding to curr_frame+1, the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * corresponding to frame 2 will be fetched. If the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 * descriptors is max=64 (or greather) the list will be fully programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * with Active descriptors and it is possible case (rare) that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 * latest descriptor(considering rollback) corresponding to frame 2 will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	 * be serviced first. HS case is more probable because, in fact, up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	 * 11 uframes (16 in the code) may be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (qh->dev_speed == USB_SPEED_HIGH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		 * Consider uframe counter also, to start xfer asap. If half of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		 * the frame elapsed skip 2 frames otherwise just 1 frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		 * Starting descriptor index must be 8-aligned, so if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		 * current frame is near to complete the next one is skipped as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		 * well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			*skip_frames = 2 * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			frame = dwc2_frame_num_inc(hsotg->frame_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 						   *skip_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			*skip_frames = 1 * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			frame = dwc2_frame_num_inc(hsotg->frame_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 						   *skip_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		frame = dwc2_full_frame_num(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		 * Two frames are skipped for FS - the current and the next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		 * But for descriptor programming, 1 frame (descriptor) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		 * enough, see example above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		*skip_frames = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	return frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * Calculate initial descriptor index for isochronous transfer based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * scheduled frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 					struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	u16 frame, fr_idx, fr_idx_tmp, skip_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 * With current ISOC processing algorithm the channel is being released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	 * called only when qh->ntd == 0 and qh->channel == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * So qh->channel != NULL branch is not used and just not removed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 * the source file. It is required for another possible approach which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 * is, do not disable and release the channel when ISOC session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * completed, just move QH to inactive schedule until new QTD arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	 * therefore starting desc_index are recalculated. In this case channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	 * is released only on ep_disable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	 * Calculate starting descriptor index. For INTERRUPT endpoint it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 * always 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (qh->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		 * Calculate initial descriptor index based on FrameList current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		 * bitmap and servicing period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		fr_idx_tmp = dwc2_frame_list_idx(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		fr_idx = (FRLISTEN_64_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			  dwc2_frame_list_idx(qh->next_active_frame) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			  fr_idx_tmp) % dwc2_frame_incr_val(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 							   &skip_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	return skip_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) #define ISOC_URB_GIVEBACK_ASAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) #define MAX_ISOC_XFER_SIZE_FS	1023
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) #define MAX_ISOC_XFER_SIZE_HS	3072
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) #define DESCNUM_THRESHOLD	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 					 struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 					 struct dwc2_qh *qh, u32 max_xfer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 					 u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	struct dwc2_hcd_iso_packet_desc *frame_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	memset(dma_desc, 0, sizeof(*dma_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (frame_desc->length > max_xfer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		qh->n_bytes[idx] = max_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		qh->n_bytes[idx] = frame_desc->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			   HOST_DMA_ISOC_NBYTES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* Set active bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	dma_desc->status |= HOST_DMA_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	qh->ntd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	qtd->isoc_frame_index_last++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) #ifdef ISOC_URB_GIVEBACK_ASAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	/* Set IOC for each descriptor corresponding to last frame of URB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		dma_desc->status |= HOST_DMA_IOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				   qh->desc_list_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			(idx * sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				    struct dwc2_qh *qh, u16 skip_frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	struct dwc2_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	u32 max_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	u16 idx, inc, n_desc = 0, ntd_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	u16 cur_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	u16 next_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	idx = qh->td_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	inc = qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * Ensure current frame number didn't overstep last scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 * descriptor. If it happens, the only way to recover is to move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * qh->td_last to current frame number + 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * So that next isoc descriptor will be scheduled on frame number + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 * and not on a past frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		if (inc < 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				 "current frame number overstep last descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 							    qh->dev_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			idx = qh->td_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (qh->host_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		if (skip_frames && !qh->channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			ntd_max -= skip_frames / qh->host_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		if (qtd->in_process &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		    qtd->isoc_frame_index_last ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		    qtd->urb->packet_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		qtd->isoc_td_first = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 						qtd->urb->packet_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 						     max_xfer_size, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			n_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		qtd->isoc_td_last = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		qtd->in_process = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	qh->td_last = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) #ifdef ISOC_URB_GIVEBACK_ASAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/* Set IOC for last descriptor if descriptor list is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (qh->ntd == ntd_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		qh->desc_list[idx].status |= HOST_DMA_IOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 					   qh->desc_list_dma + (idx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 					   sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 					   sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * Set IOC bit only for one descriptor. Always try to be ahead of HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 * processing, i.e. on IOC generation driver activates next descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	 * but core continues to process descriptors following the one with IOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 * set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (n_desc > DESCNUM_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 * Move IOC "up". Required even if there is only one QTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 * in the list, because QTDs might continue to be queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 * but during the activation it was only one queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 * Actually more than one QTD might be in the list if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		 * function called from XferCompletion - QTDs was queued during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		 * HW processing of the previous descriptor chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 					    qh->dev_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		 * Set the IOC for the latest descriptor if either number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 * descriptors is not greater than threshold or no more new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 * descriptors activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	qh->desc_list[idx].status |= HOST_DMA_IOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				   qh->desc_list_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 				   (idx * sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				   sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				    struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 				    struct dwc2_qtd *qtd, struct dwc2_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				    int n_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	int len = chan->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (chan->ep_is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		int num_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		if (len > 0 && chan->max_packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			num_packets = (len + chan->max_packet - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 					/ chan->max_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			/* Need 1 packet for transfer length of 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			num_packets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		/* Always program an integral # of packets for IN transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		len = num_packets * chan->max_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	qh->n_bytes[n_desc] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	    qtd->control_phase == DWC2_CONTROL_SETUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		dma_desc->status |= HOST_DMA_SUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	dma_desc->buf = (u32)chan->xfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				   qh->desc_list_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 				   (n_desc * sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				   sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	 * Last (or only) descriptor of IN transfer with actual size less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * than MaxPacket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (len > chan->xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		chan->xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		chan->xfer_dma += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		chan->xfer_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 					struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	struct dwc2_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	struct dwc2_host_chan *chan = qh->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	int n_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		 (unsigned long)chan->xfer_dma, chan->xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * if SG transfer consists of multiple URBs, this pointer is re-assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 * to the buffer of the currently processed QTD. For non-SG request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	 * there is always one QTD active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (n_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			/* SG request - more than 1 QTD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			chan->xfer_dma = qtd->urb->dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 					qtd->urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			chan->xfer_len = qtd->urb->length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 					qtd->urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				 (unsigned long)chan->xfer_dma, chan->xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		qtd->n_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			if (n_desc > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 				qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 				dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 					 "set A bit in desc %d (%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 					 n_desc - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 					 &qh->desc_list[n_desc - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 				dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 							   qh->desc_list_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 					((n_desc - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 					sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 					sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 					DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				 "desc %d (%p) buf=%08x status=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				 n_desc, &qh->desc_list[n_desc],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				 qh->desc_list[n_desc].buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				 qh->desc_list[n_desc].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			qtd->n_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			n_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		} while (chan->xfer_len > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			 n_desc != MAX_DMA_DESC_NUM_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		qtd->in_process = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (n_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		qh->desc_list[n_desc - 1].status |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 				HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			 n_desc - 1, &qh->desc_list[n_desc - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 					   qh->desc_list_dma + (n_desc - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 					   sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 					   sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		if (n_desc > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			qh->desc_list[0].status |= HOST_DMA_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				 &qh->desc_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			dma_sync_single_for_device(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 						   qh->desc_list_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 					sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 					DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		chan->ntd = n_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * @hsotg: The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  * @qh:    The QH to init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * Return: 0 if successful, negative error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * For Control and Bulk endpoints, initializes descriptor list and starts the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * list then updates FrameList, marking appropriate entries as active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * For Isochronous endpoints the starting descriptor index is calculated based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * on the scheduled frame, but only on the first transfer descriptor within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * session. Then the transfer is started via enabling the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * For Isochronous endpoints the channel is not halted on XferComplete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  * interrupt so remains assigned to the endpoint(QH) until session is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* Channel is already assigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct dwc2_host_chan *chan = qh->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	u16 skip_frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	switch (chan->ep_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	case USB_ENDPOINT_XFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		dwc2_init_non_isoc_dma_desc(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		dwc2_hc_start_transfer_ddma(hsotg, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		dwc2_init_non_isoc_dma_desc(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		dwc2_update_frame_list(hsotg, qh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		dwc2_hc_start_transfer_ddma(hsotg, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		if (!qh->ntd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		if (!chan->xfer_started) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			dwc2_update_frame_list(hsotg, qh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			 * Always set to max, instead of actual size. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			 * ntd will be changed with channel being enabled. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			 * recommended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			chan->ntd = dwc2_max_desc_num(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			/* Enable channel only once for ISOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			dwc2_hc_start_transfer_ddma(hsotg, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) #define DWC2_CMPL_DONE		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) #define DWC2_CMPL_STOP		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 					struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 					struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 					struct dwc2_qh *qh, u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	struct dwc2_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	struct dwc2_hcd_iso_packet_desc *frame_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	u16 remain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (!qtd->urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 				sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	dma_desc = &qh->desc_list[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (chan->ep_is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			 HOST_DMA_ISOC_NBYTES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		 * XactError, or unable to complete all the transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		 * in the scheduled micro-frame/frame, both indicated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		 * HOST_DMA_STS_PKTERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		qtd->urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		frame_desc->actual_length = qh->n_bytes[idx] - remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		frame_desc->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		/* Success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		frame_desc->actual_length = qh->n_bytes[idx] - remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		frame_desc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		 * urb->status is not used for isoc transfers here. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		 * individual frame_desc status are used instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		dwc2_host_complete(hsotg, qtd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 * This check is necessary because urb_dequeue can be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 * from urb complete callback (sound driver for example). All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 * pending URBs are dequeued there, so no need for further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		 * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		rc = DWC2_CMPL_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	qh->ntd--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	/* Stop if IOC requested descriptor reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (dma_desc->status & HOST_DMA_IOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		rc = DWC2_CMPL_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 					 struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 					 enum dwc2_halt_status halt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct dwc2_hcd_iso_packet_desc *frame_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	struct dwc2_qtd *qtd, *qtd_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	struct dwc2_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	qh = chan->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	idx = qh->td_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			qtd->in_process = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (halt_status == DWC2_HC_XFER_AHB_ERR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	    halt_status == DWC2_HC_XFER_BABBLE_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		 * Channel is halted in these error cases, considered as serious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 * issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		 * Complete all URBs marking all frames as failed, irrespective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		 * whether some of the descriptors (frames) succeeded or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		 * Pass error code to completion routine as well, to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		 * urb->status, some of class drivers might use it to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		 * queing transfer requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			  -EIO : -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 					 qtd_list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			if (qtd->urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 				for (idx = 0; idx < qtd->urb->packet_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 				     idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 					frame_desc = &qtd->urb->iso_descs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 					frame_desc->status = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				dwc2_host_complete(hsotg, qtd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		if (!qtd->in_process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		 * Ensure idx corresponds to descriptor where first urb of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		 * qtd was added. In fact, during isoc desc init, dwc2 may skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		 * an index if current frame number is already over this index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (idx != qtd->isoc_td_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				 "try to complete %d instead of %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				 idx, qtd->isoc_td_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			idx = qtd->isoc_td_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			struct dwc2_qtd *qtd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			u16 cur_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 							  idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 						    chan->speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			if (rc == DWC2_CMPL_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			/* rc == DWC2_CMPL_STOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			if (qh->host_interval >= 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				goto stop_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			qh->td_first = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			qtd_next = list_first_entry(&qh->qtd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 						    struct dwc2_qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 						    qtd_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			if (dwc2_frame_idx_num_gt(cur_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 						  qtd_next->isoc_td_last))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			goto stop_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		} while (idx != qh->td_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) stop_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	qh->td_first = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 					       struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 					struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 					struct dwc2_dma_desc *dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 					enum dwc2_halt_status halt_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 					u32 n_bytes, int *xfer_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct dwc2_hcd_urb *urb = qtd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	u16 remain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (chan->ep_is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			 HOST_DMA_NBYTES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (halt_status == DWC2_HC_XFER_AHB_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		dev_err(hsotg->dev, "EIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		urb->status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		switch (halt_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		case DWC2_HC_XFER_STALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			dev_vdbg(hsotg->dev, "Stall\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			urb->status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		case DWC2_HC_XFER_BABBLE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			dev_err(hsotg->dev, "Babble\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			urb->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		case DWC2_HC_XFER_XACT_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			dev_err(hsotg->dev, "XactErr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			urb->status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			dev_err(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				"%s: Unhandled descriptor error status (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 				__func__, halt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	if (dma_desc->status & HOST_DMA_A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			 "Active descriptor encountered on channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			 chan->hc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		if (qtd->control_phase == DWC2_CONTROL_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			urb->actual_length += n_bytes - remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			if (remain || urb->actual_length >= urb->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 				 * For Control Data stage do not set urb->status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				 * to 0, to prevent URB callback. Set it when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				 * Status phase is done. See below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 				*xfer_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		} else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			urb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			*xfer_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		/* No handling for SETUP stage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		/* BULK and INTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		urb->actual_length += n_bytes - remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			 urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		if (remain || urb->actual_length >= urb->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			urb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			*xfer_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 				      struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 				      int chnum, struct dwc2_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				      int desc_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 				      enum dwc2_halt_status halt_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 				      int *xfer_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	struct dwc2_qh *qh = chan->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	struct dwc2_hcd_urb *urb = qtd->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	struct dwc2_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	u32 n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	int failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (!urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	dma_sync_single_for_cpu(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				qh->desc_list_dma + (desc_num *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				sizeof(struct dwc2_dma_desc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 				sizeof(struct dwc2_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	dma_desc = &qh->desc_list[desc_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	n_bytes = qh->n_bytes[desc_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		 qtd, urb, desc_num, dma_desc, n_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 						     halt_status, n_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 						     xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		dwc2_host_complete(hsotg, qtd, urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			 failed, *xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		return failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		switch (qtd->control_phase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		case DWC2_CONTROL_SETUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			if (urb->length > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 				qtd->control_phase = DWC2_CONTROL_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 				qtd->control_phase = DWC2_CONTROL_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				 "  Control setup transaction done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		case DWC2_CONTROL_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			if (*xfer_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 				qtd->control_phase = DWC2_CONTROL_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 				dev_vdbg(hsotg->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 					 "  Control data transfer done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			} else if (desc_num + 1 == qtd->n_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				 * Last descriptor for Control data stage which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 				 * is not completed yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 							  qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 					     struct dwc2_host_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 					     int chnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 					     enum dwc2_halt_status halt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	struct list_head *qtd_item, *qtd_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	struct dwc2_qh *qh = chan->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	struct dwc2_qtd *qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	int xfer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	int desc_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			qtd->in_process = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		int qtd_desc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		xfer_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		qtd_desc_count = qtd->n_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		for (i = 0; i < qtd_desc_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 						       desc_num, halt_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 						       &xfer_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 				qtd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 				goto stop_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			desc_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) stop_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		 * Resetting the data toggle for bulk and interrupt endpoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		 * in case of stall. See handle_hc_stall_intr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		if (halt_status == DWC2_HC_XFER_STALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			qh->data_toggle = DWC2_HC_PID_DATA0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		if (chan->hcint & HCINTMSK_NYET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			 * Got a NYET on the last transaction of the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			 * It means that the endpoint should be in the PING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			 * state at the beginning of the next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			qh->ping_state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * status and calls completion routine for the URB if it's done. Called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * interrupt handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * @hsotg:       The HCD state structure for the DWC OTG controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * @chan:        Host channel the transfer is completed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * @chnum:       Index of Host channel registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * @halt_status: Reason the channel is being halted or just XferComplete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  *               for isochronous transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  * Releases the channel to be used by other transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  * In case of Isochronous endpoint the channel is not halted until the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  * the session, i.e. QTD list is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  * If periodic channel released the FrameList is updated accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * Calls transaction selection routines to activate pending transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 				 struct dwc2_host_chan *chan, int chnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 				 enum dwc2_halt_status halt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	struct dwc2_qh *qh = chan->qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	int continue_isoc_xfer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	enum dwc2_transaction_type tr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		/* Release the channel if halted or session completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		if (halt_status != DWC2_HC_XFER_COMPLETE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		    list_empty(&qh->qtd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			struct dwc2_qtd *qtd, *qtd_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			 * Kill all remainings QTDs since channel has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			 * halted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			list_for_each_entry_safe(qtd, qtd_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 						 &qh->qtd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 						 qtd_list_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				dwc2_host_complete(hsotg, qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 						   -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				dwc2_hcd_qtd_unlink_and_free(hsotg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 							     qtd, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			/* Halt the channel if session completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			if (halt_status == DWC2_HC_XFER_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 				dwc2_hc_halt(hsotg, chan, halt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			dwc2_release_channel_ddma(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			dwc2_hcd_qh_unlink(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			/* Keep in assigned schedule to continue transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			list_move_tail(&qh->qh_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 				       &hsotg->periodic_sched_assigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			 * If channel has been halted during giveback of urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			 * then prevent any new scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			if (!chan->halt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 				continue_isoc_xfer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		 * Todo: Consider the case when period exceeds FrameList size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		 * Frame Rollover interrupt should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		 * Scan descriptor list to complete the URB(s), then release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		 * the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 						 halt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		dwc2_release_channel_ddma(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		dwc2_hcd_qh_unlink(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		if (!list_empty(&qh->qtd_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			 * Add back to inactive non-periodic schedule on normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			 * completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			dwc2_hcd_qh_add(hsotg, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	tr_type = dwc2_hcd_select_transactions(hsotg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		if (continue_isoc_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			if (tr_type == DWC2_TRANSACTION_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 				tr_type = DWC2_TRANSACTION_PERIODIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				tr_type = DWC2_TRANSACTION_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		dwc2_hcd_queue_transactions(hsotg, tr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }