^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2001 by David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /* this file is part of ehci-hcd.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * There's basically three types of memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * - data used only by the HCD ... kmalloc is fine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * - async and periodic schedules, shared by HC and HCD ... these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * need to use dma_pool or dma_alloc_coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - driver buffers, read/written by HC ... single shot DMA mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * No memory seen by this driver is pageable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Allocate the key transfer structures from the previously allocated pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) memset (qtd, 0, sizeof *qtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) qtd->qtd_dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) qtd->hw_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) qtd->hw_alt_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) INIT_LIST_HEAD (&qtd->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct ehci_qtd *qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (qtd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) ehci_qtd_init(ehci, qtd, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return qtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* clean qtds first, and know this is not linked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ehci_dbg (ehci, "unused qh not empty!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) BUG ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (qh->dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ehci_qtd_free (ehci, qh->dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct ehci_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) qh = kzalloc(sizeof *qh, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) qh->hw = (struct ehci_qh_hw *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) dma_pool_alloc(ehci->qh_pool, flags, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!qh->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) memset(qh->hw, 0, sizeof *qh->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) qh->qh_dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) // INIT_LIST_HEAD (&qh->qh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) INIT_LIST_HEAD (&qh->qtd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) INIT_LIST_HEAD(&qh->unlink_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* dummy td enables safe urb queuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) qh->dummy = ehci_qtd_alloc (ehci, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (qh->dummy == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ehci_dbg (ehci, "no dummy td\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*-------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* The queue heads and transfer descriptors are managed from pools tied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * to each of the "per device" structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * This is the initialisation and cleanup code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void ehci_mem_cleanup (struct ehci_hcd *ehci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (ehci->async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) qh_destroy(ehci, ehci->async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ehci->async = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (ehci->dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) qh_destroy(ehci, ehci->dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ehci->dummy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* DMA consistent memory and pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dma_pool_destroy(ehci->qtd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ehci->qtd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dma_pool_destroy(ehci->qh_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ehci->qh_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dma_pool_destroy(ehci->itd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ehci->itd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dma_pool_destroy(ehci->sitd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ehci->sitd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ehci->periodic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dma_free_coherent(ehci_to_hcd(ehci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ehci->periodic_size * sizeof (u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ehci->periodic, ehci->periodic_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ehci->periodic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* shadow periodic table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) kfree(ehci->pshadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ehci->pshadow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* remember to add cleanup code (above) if you add anything here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* QTDs for control/bulk/intr transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ehci->qtd_pool = dma_pool_create ("ehci_qtd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ehci_to_hcd(ehci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sizeof (struct ehci_qtd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 32 /* byte alignment (for hw parts) */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 4096 /* can't cross 4K */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!ehci->qtd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* QHs for control/bulk/intr transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ehci->qh_pool = dma_pool_create ("ehci_qh",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ehci_to_hcd(ehci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sizeof(struct ehci_qh_hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 32 /* byte alignment (for hw parts) */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 4096 /* can't cross 4K */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!ehci->qh_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ehci->async = ehci_qh_alloc (ehci, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!ehci->async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* ITD for high speed ISO transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ehci->itd_pool = dma_pool_create ("ehci_itd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ehci_to_hcd(ehci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) sizeof (struct ehci_itd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 32 /* byte alignment (for hw parts) */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 4096 /* can't cross 4K */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!ehci->itd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* SITD for full/low speed split ISO transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ehci->sitd_pool = dma_pool_create ("ehci_sitd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ehci_to_hcd(ehci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) sizeof (struct ehci_sitd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 32 /* byte alignment (for hw parts) */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 4096 /* can't cross 4K */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!ehci->sitd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Hardware periodic table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ehci->periodic = (__le32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dma_alloc_coherent(ehci_to_hcd(ehci)->self.sysdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ehci->periodic_size * sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) &ehci->periodic_dma, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ehci->periodic == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ehci->use_dummy_qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ehci_qh_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ehci->dummy = ehci_qh_alloc(ehci, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!ehci->dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) hw = ehci->dummy->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) hw->hw_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) hw->hw_qtd_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) hw->hw_alt_next = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ehci->dummy->hw = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) for (i = 0; i < ehci->periodic_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ehci->periodic[i] = cpu_to_hc32(ehci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ehci->dummy->qh_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) for (i = 0; i < ehci->periodic_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ehci->periodic[i] = EHCI_LIST_END(ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* software shadow of hardware table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (ehci->pshadow != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ehci_dbg (ehci, "couldn't init memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ehci_mem_cleanup (ehci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }