Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2012 by Alan Stern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) /* This file is part of ehci-hcd.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) /* Set a bit in the USBCMD register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	ehci->command |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	/* unblock posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	ehci_readl(ehci, &ehci->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /* Clear a bit in the USBCMD register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	ehci->command &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	/* unblock posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	ehci_readl(ehci, &ehci->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * EHCI timer support...  Now using hrtimers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * Lots of different events are triggered from ehci->hrtimer.  Whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * the timer routine runs, it checks each possible event; events that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * currently enabled and whose expiration time has passed get handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * The set of enabled events is stored as a collection of bitflags in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * ehci->enabled_hrtimer_events, and they are numbered in order of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * increasing delay values (ranging between 1 ms and 100 ms).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * Rather than implementing a sorted list or tree of all pending events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * we keep track only of the lowest-numbered pending event, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * ehci->next_hrtimer_event.  Whenever ehci->hrtimer gets restarted, its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * expiration time is set to the timeout value for this event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * As a result, events might not get handled right away; the actual delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * could be anywhere up to twice the requested delay.  This doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * matter, because none of the events are especially time-critical.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * ones that matter most all have a delay of 1 ms, so they will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * handled after 2 ms at most, which is okay.  In addition to this, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * allow for an expiration range of 1 ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Delay lengths for the hrtimer event types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * Keep this list sorted by delay length, in the same order as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * the event types indexed by enum ehci_hrtimer_event in ehci.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static unsigned event_delays_ns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_ASS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_PSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_DEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	1125 * NSEC_PER_USEC,	/* EHCI_HRTIMER_UNLINK_INTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	2 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_FREE_ITDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	2 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_ACTIVE_UNLINK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	5 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_START_UNLINK_INTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	6 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_ASYNC_UNLINKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	10 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_IAA_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	10 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_DISABLE_PERIODIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	15 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_DISABLE_ASYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	100 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_IO_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) /* Enable a pending hrtimer event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		bool resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	ktime_t		*timeout = &ehci->hr_timeouts[event];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	ehci->enabled_hrtimer_events |= (1 << event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/* Track only the lowest-numbered pending event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (event < ehci->next_hrtimer_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		ehci->next_hrtimer_event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 				NSEC_PER_MSEC, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void ehci_poll_ASS(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	unsigned	actual, want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/* Don't enable anything if the controller isn't running (e.g., died) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (ehci->rh_state != EHCI_RH_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (want != actual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		/* Poll again later, but give up after about 2-4 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		if (ehci->ASS_poll_count++ < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				want, actual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	ehci->ASS_poll_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/* The status is up-to-date; restart or stop the schedule as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (want == 0) {	/* Stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		if (ehci->async_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			ehci_set_command_bit(ehci, CMD_ASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	} else {		/* Running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (ehci->async_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			/* Turn off the schedule after a while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 					true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Turn off the async schedule after a brief delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void ehci_disable_ASE(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ehci_clear_command_bit(ehci, CMD_ASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void ehci_poll_PSS(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	unsigned	actual, want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	/* Don't do anything if the controller isn't running (e.g., died) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (ehci->rh_state != EHCI_RH_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (want != actual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		/* Poll again later, but give up after about 2-4 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if (ehci->PSS_poll_count++ < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 				want, actual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	ehci->PSS_poll_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/* The status is up-to-date; restart or stop the schedule as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (want == 0) {	/* Stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		if (ehci->periodic_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			ehci_set_command_bit(ehci, CMD_PSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	} else {		/* Running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		if (ehci->periodic_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			/* Turn off the schedule after a while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 					true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Turn off the periodic schedule after a brief delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void ehci_disable_PSE(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	ehci_clear_command_bit(ehci, CMD_PSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Poll the STS_HALT status bit; see when a dead controller stops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void ehci_handle_controller_death(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		/* Give up after a few milliseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		if (ehci->died_poll_count++ < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			/* Try again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* Clean up the mess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	ehci->rh_state = EHCI_RH_HALTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	ehci_writel(ehci, 0, &ehci->regs->configured_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	ehci_work(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	end_unlink_async(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	/* Not in process context, so don't try to reset the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* start to unlink interrupt QHs  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	bool		stopped = (ehci->rh_state < EHCI_RH_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 * Process all the QHs on the intr_unlink list that were added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 * before the current unlink cycle began.  The list is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 * temporal order, so stop when we reach the first entry in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 * current cycle.  But if the root hub isn't running then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * process all the QHs on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	while (!list_empty(&ehci->intr_unlink_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		struct ehci_qh	*qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		qh = list_first_entry(&ehci->intr_unlink_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 				struct ehci_qh, unlink_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		if (!stopped && (qh->unlink_cycle ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 				ehci->intr_unlink_wait_cycle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		list_del_init(&qh->unlink_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		start_unlink_intr(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	/* Handle remaining entries later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (!list_empty(&ehci->intr_unlink_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		++ehci->intr_unlink_wait_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Handle unlinked interrupt QHs once they are gone from the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	bool		stopped = (ehci->rh_state < EHCI_RH_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	 * Process all the QHs on the intr_unlink list that were added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	 * before the current unlink cycle began.  The list is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 * temporal order, so stop when we reach the first entry in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 * current cycle.  But if the root hub isn't running then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * process all the QHs on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	ehci->intr_unlinking = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	while (!list_empty(&ehci->intr_unlink)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		struct ehci_qh	*qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 				unlink_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		list_del_init(&qh->unlink_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		end_unlink_intr(ehci, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	/* Handle remaining entries later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (!list_empty(&ehci->intr_unlink)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		++ehci->intr_unlink_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	ehci->intr_unlinking = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Start another free-iTDs/siTDs cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void start_free_itds(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		ehci->last_itd_to_free = list_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 				ehci->cached_itd_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				struct ehci_itd, itd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		ehci->last_sitd_to_free = list_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				ehci->cached_sitd_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 				struct ehci_sitd, sitd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Wait for controller to stop using old iTDs and siTDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void end_free_itds(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	struct ehci_itd		*itd, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	struct ehci_sitd	*sitd, *sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (ehci->rh_state < EHCI_RH_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		ehci->last_itd_to_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		ehci->last_sitd_to_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		list_del(&itd->itd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		if (itd == ehci->last_itd_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		list_del(&sitd->sitd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		if (sitd == ehci->last_sitd_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (!list_empty(&ehci->cached_itd_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			!list_empty(&ehci->cached_sitd_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		start_free_itds(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Handle lost (or very late) IAA interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	u32 cmd, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * Lost IAA irqs wedge things badly; seen first with a vt8235.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * So we need this watchdog, but must protect it against both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * (a) SMP races against real IAA firing and retriggering, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 * (b) clean HC shutdown, when IAA watchdog was pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	/* If we get here, IAA is *REALLY* late.  It's barely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 * conceivable that the system is so busy that CMD_IAAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 * is still legitimately set, so let's be sure it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	 * clear before we read STS_IAA.  (The HC should clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 * CMD_IAAD when it sets STS_IAA.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	cmd = ehci_readl(ehci, &ehci->regs->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	 * If IAA is set here it either legitimately triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	 * after the watchdog timer expired (_way_ late, so we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	 * still count it as lost) ... or a silicon erratum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	 * - VIA seems to set IAA without triggering the IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	 * - IAAD potentially cleared without setting IAA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	status = ehci_readl(ehci, &ehci->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		INCR(ehci->stats.lost_iaa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		ehci_writel(ehci, STS_IAA, &ehci->regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	end_iaa_cycle(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Enable the I/O watchdog, if appropriate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void turn_on_io_watchdog(struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	/* Not needed if the controller isn't running or it's already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	if (ehci->rh_state != EHCI_RH_RUNNING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			(ehci->enabled_hrtimer_events &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				BIT(EHCI_HRTIMER_IO_WATCHDOG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	 * Isochronous transfers always need the watchdog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 * For other sorts we use it only if the flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			ehci->async_count + ehci->intr_count > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * Handler functions for the hrtimer event types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * Keep this array in the same order as the event types indexed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * enum ehci_hrtimer_event in ehci.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static void (*event_handlers[])(struct ehci_hcd *) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	ehci_poll_ASS,			/* EHCI_HRTIMER_POLL_ASS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	ehci_poll_PSS,			/* EHCI_HRTIMER_POLL_PSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	ehci_handle_controller_death,	/* EHCI_HRTIMER_POLL_DEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	ehci_handle_intr_unlinks,	/* EHCI_HRTIMER_UNLINK_INTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	end_free_itds,			/* EHCI_HRTIMER_FREE_ITDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	end_unlink_async,		/* EHCI_HRTIMER_ACTIVE_UNLINK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	ehci_handle_start_intr_unlinks,	/* EHCI_HRTIMER_START_UNLINK_INTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	unlink_empty_async,		/* EHCI_HRTIMER_ASYNC_UNLINKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	ehci_iaa_watchdog,		/* EHCI_HRTIMER_IAA_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	ehci_disable_PSE,		/* EHCI_HRTIMER_DISABLE_PERIODIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	ehci_disable_ASE,		/* EHCI_HRTIMER_DISABLE_ASYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	ehci_work,			/* EHCI_HRTIMER_IO_WATCHDOG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct ehci_hcd	*ehci = container_of(t, struct ehci_hcd, hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	ktime_t		now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	unsigned long	events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	unsigned long	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	unsigned	e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	spin_lock_irqsave(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	events = ehci->enabled_hrtimer_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	ehci->enabled_hrtimer_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 * Check each pending event.  If its time has expired, handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 * the event; otherwise re-enable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	now = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			event_handlers[e](ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			ehci_enable_event(ehci, e, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	spin_unlock_irqrestore(&ehci->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }