Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Thunderbolt driver - NHI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * The NHI (native host interface) is the pci device that allows us to send and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * receive frames from the thunderbolt bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 2018, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/property.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/platform_data/x86/apple.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "nhi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "nhi_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define RING_FIRST_USABLE_HOPID	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * Minimal number of vectors when we use MSI-X. Two for control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * Rx/Tx and the rest four are for cross domain DMA paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define MSIX_MIN_VECS		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define MSIX_MAX_VECS		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define NHI_MAILBOX_TIMEOUT	500 /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static int ring_interrupt_index(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	int bit = ring->hop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	if (!ring->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		bit += ring->nhi->hop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * ring_interrupt_active() - activate/deactivate interrupts for a single ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * ring->nhi->lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static void ring_interrupt_active(struct tb_ring *ring, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	int reg = REG_RING_INTERRUPT_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		  ring_interrupt_index(ring) / 32 * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	int bit = ring_interrupt_index(ring) & 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	int mask = 1 << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	u32 old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	if (ring->irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		u32 step, shift, ivr, misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		void __iomem *ivr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		if (ring->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 			index = ring->hop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 			index = ring->hop + ring->nhi->hop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		 * Ask the hardware to clear interrupt status bits automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		 * since we already know which interrupt was triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 			misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 			iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		ivr = ioread32(ivr_base + step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			ivr |= ring->vector << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		iowrite32(ivr, ivr_base + step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	old = ioread32(ring->nhi->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		new = old | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		new = old & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	dev_dbg(&ring->nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		active ? "enabling" : "disabling", reg, bit, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (new == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		dev_WARN(&ring->nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 					 "interrupt for %s %d is already %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 					 RING_TYPE(ring), ring->hop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 					 active ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	iowrite32(new, ring->nhi->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * nhi_disable_interrupts() - disable interrupts for all rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * Use only during init and shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static void nhi_disable_interrupts(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	/* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	/* clear interrupt status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) /* ring helper methods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static void __iomem *ring_desc_base(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	void __iomem *io = ring->nhi->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	io += ring->hop * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static void __iomem *ring_options_base(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	void __iomem *io = ring->nhi->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	io += ring->hop * 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	return io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	 * The other 16-bits in the register is read-only and writes to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	 * are ignored by the hardware so we can save one ioread32() by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 * filling the read-only bits with zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	iowrite32(cons, ring_desc_base(ring) + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	/* See ring_iowrite_cons() above for explanation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	iowrite32(prod << 16, ring_desc_base(ring) + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	iowrite32(value, ring_desc_base(ring) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	iowrite32(value, ring_desc_base(ring) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	iowrite32(value, ring_options_base(ring) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static bool ring_full(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	return ((ring->head + 1) % ring->size) == ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static bool ring_empty(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return ring->head == ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * ring_write_descriptors() - post frames from ring->queue to the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * ring->lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void ring_write_descriptors(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct ring_frame *frame, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct ring_desc *descriptor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	list_for_each_entry_safe(frame, n, &ring->queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		if (ring_full(ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		list_move_tail(&frame->list, &ring->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		descriptor = &ring->descriptors[ring->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		descriptor->phys = frame->buffer_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		descriptor->time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		if (ring->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			descriptor->length = frame->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			descriptor->eof = frame->eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 			descriptor->sof = frame->sof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		ring->head = (ring->head + 1) % ring->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		if (ring->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			ring_iowrite_prod(ring, ring->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			ring_iowrite_cons(ring, ring->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * ring_work() - progress completed frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * If the ring is shutting down then all frames are marked as canceled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * their callbacks are invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * Otherwise we collect all completed frame from the ring buffer, write new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * frame to the ring buffer and invoke the callbacks for the completed frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static void ring_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct tb_ring *ring = container_of(work, typeof(*ring), work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct ring_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	bool canceled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	LIST_HEAD(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	spin_lock_irqsave(&ring->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (!ring->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		/*  Move all frames to done and mark them as canceled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		list_splice_tail_init(&ring->in_flight, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		list_splice_tail_init(&ring->queue, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		canceled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		goto invoke_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	while (!ring_empty(ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		if (!(ring->descriptors[ring->tail].flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 				& RING_DESC_COMPLETED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 					 list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		list_move_tail(&frame->list, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		if (!ring->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			frame->size = ring->descriptors[ring->tail].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			frame->eof = ring->descriptors[ring->tail].eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			frame->sof = ring->descriptors[ring->tail].sof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			frame->flags = ring->descriptors[ring->tail].flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		ring->tail = (ring->tail + 1) % ring->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	ring_write_descriptors(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) invoke_callback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	/* allow callbacks to schedule new work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	spin_unlock_irqrestore(&ring->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	while (!list_empty(&done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		frame = list_first_entry(&done, typeof(*frame), list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		 * The callback may reenqueue or delete frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		 * Do not hold on to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		list_del_init(&frame->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		if (frame->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			frame->callback(ring, frame, canceled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	spin_lock_irqsave(&ring->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (ring->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		list_add_tail(&frame->list, &ring->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		ring_write_descriptors(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		ret = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	spin_unlock_irqrestore(&ring->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * tb_ring_poll() - Poll one completed frame from the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * @ring: Ring to poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * This function can be called when @start_poll callback of the @ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * has been called. It will read one completed frame from the ring and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  * return it to the caller. Returns %NULL if there is no more completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) struct ring_frame *tb_ring_poll(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct ring_frame *frame = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	spin_lock_irqsave(&ring->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (!ring->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (ring_empty(ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 					 list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		list_del_init(&frame->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		if (!ring->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			frame->size = ring->descriptors[ring->tail].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			frame->eof = ring->descriptors[ring->tail].eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			frame->sof = ring->descriptors[ring->tail].sof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			frame->flags = ring->descriptors[ring->tail].flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		ring->tail = (ring->tail + 1) % ring->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	spin_unlock_irqrestore(&ring->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	return frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) EXPORT_SYMBOL_GPL(tb_ring_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	int idx = ring_interrupt_index(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	int bit = idx % 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	val = ioread32(ring->nhi->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	if (mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		val &= ~BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		val |= BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	iowrite32(val, ring->nhi->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) /* Both @nhi->lock and @ring->lock should be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static void __ring_interrupt(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (!ring->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (ring->start_poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		__ring_interrupt_mask(ring, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		ring->start_poll(ring->poll_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		schedule_work(&ring->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * tb_ring_poll_complete() - Re-start interrupt for the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * @ring: Ring to re-start the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * This will re-start (unmask) the ring interrupt once the user is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * with polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) void tb_ring_poll_complete(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	spin_lock_irqsave(&ring->nhi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	spin_lock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (ring->start_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		__ring_interrupt_mask(ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	spin_unlock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	spin_unlock_irqrestore(&ring->nhi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static irqreturn_t ring_msix(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct tb_ring *ring = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	spin_lock(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	spin_lock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	__ring_interrupt(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	spin_unlock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	spin_unlock(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct tb_nhi *nhi = ring->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	if (!nhi->pdev->msix_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	ring->vector = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		goto err_ida_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	ring->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		goto err_ida_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) err_ida_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	ida_simple_remove(&nhi->msix_ida, ring->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static void ring_release_msix(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (ring->irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	free_irq(ring->irq, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	ring->vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	ring->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	spin_lock_irq(&nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (ring->hop < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 * Automatically allocate HopID from the non-reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 * range 1 .. hop_count - 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			if (ring->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				if (!nhi->tx_rings[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 					ring->hop = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 				if (!nhi->rx_rings[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 					ring->hop = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (ring->is_tx && nhi->tx_rings[ring->hop]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			 ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	} else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			 ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (ring->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		nhi->tx_rings[ring->hop] = ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		nhi->rx_rings[ring->hop] = ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	spin_unlock_irq(&nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				     bool transmit, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				     u16 sof_mask, u16 eof_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				     void (*start_poll)(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 				     void *poll_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct tb_ring *ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		transmit ? "TX" : "RX", hop, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	spin_lock_init(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	INIT_LIST_HEAD(&ring->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	INIT_LIST_HEAD(&ring->in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	INIT_WORK(&ring->work, ring_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	ring->nhi = nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	ring->hop = hop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	ring->is_tx = transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	ring->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	ring->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	ring->sof_mask = sof_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	ring->eof_mask = eof_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	ring->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	ring->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	ring->start_poll = start_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	ring->poll_data = poll_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			size * sizeof(*ring->descriptors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (!ring->descriptors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		goto err_free_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (nhi_alloc_hop(nhi, ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		goto err_release_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) err_release_msix:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	ring_release_msix(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) err_free_descs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	dma_free_coherent(&ring->nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			  ring->size * sizeof(*ring->descriptors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			  ring->descriptors, ring->descriptors_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) err_free_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * tb_ring_alloc_tx() - Allocate DMA ring for transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * @nhi: Pointer to the NHI the ring is to be allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * @hop: HopID (ring) to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * @size: Number of entries in the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * @flags: Flags for the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				 unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * tb_ring_alloc_rx() - Allocate DMA ring for receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * @nhi: Pointer to the NHI the ring is to be allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * @size: Number of entries in the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * @flags: Flags for the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * @sof_mask: Mask of PDF values that start a frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * @eof_mask: Mask of PDF values that end a frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * @start_poll: If not %NULL the ring will call this function when an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *		interrupt is triggered and masked, instead of callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  *		in each Rx frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * @poll_data: Optional data passed to @start_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 				 unsigned int flags, u16 sof_mask, u16 eof_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				 void (*start_poll)(void *), void *poll_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			     start_poll, poll_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * tb_ring_start() - enable a ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * Must not be invoked in parallel with tb_ring_stop().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) void tb_ring_start(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	u16 frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	spin_lock_irq(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	spin_lock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (ring->nhi->going_away)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (ring->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		RING_TYPE(ring), ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (ring->flags & RING_FLAG_FRAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		/* Means 4096 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		frame_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		flags = RING_FLAG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		frame_size = TB_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (ring->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		ring_iowrite32desc(ring, ring->size, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		ring_iowrite32options(ring, 0, 4); /* time releated ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		ring_iowrite32options(ring, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		ring_iowrite32options(ring, sof_eof_mask, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		ring_iowrite32options(ring, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	ring_interrupt_active(ring, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	ring->running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	spin_unlock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	spin_unlock_irq(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) EXPORT_SYMBOL_GPL(tb_ring_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * tb_ring_stop() - shutdown a ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * Must not be invoked from a callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  * This method will disable the ring. Further calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  * called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * All enqueued frames will be canceled and their callbacks will be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * with frame->canceled set to true (on the callback thread). This method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * returns only after all callback invocations have finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) void tb_ring_stop(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	spin_lock_irq(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	spin_lock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		RING_TYPE(ring), ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (ring->nhi->going_away)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (!ring->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			 RING_TYPE(ring), ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	ring_interrupt_active(ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	ring_iowrite32options(ring, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	ring_iowrite64desc(ring, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	ring_iowrite32desc(ring, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	ring_iowrite32desc(ring, 0, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	ring->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	ring->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	spin_unlock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	spin_unlock_irq(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	 * schedule ring->work to invoke callbacks on all remaining frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	schedule_work(&ring->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	flush_work(&ring->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) EXPORT_SYMBOL_GPL(tb_ring_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * tb_ring_free() - free ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * When this method returns all invocations of ring->callback will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  * finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * Ring must be stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * Must NOT be called from ring_frame->callback!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) void tb_ring_free(struct tb_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	spin_lock_irq(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 * Dissociate the ring from the NHI. This also ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * nhi_interrupt_work cannot reschedule ring->work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (ring->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		ring->nhi->tx_rings[ring->hop] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		ring->nhi->rx_rings[ring->hop] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (ring->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			 RING_TYPE(ring), ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	spin_unlock_irq(&ring->nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	ring_release_msix(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	dma_free_coherent(&ring->nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			  ring->size * sizeof(*ring->descriptors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			  ring->descriptors, ring->descriptors_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	ring->descriptors = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	ring->descriptors_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	 * ring->work can no longer be scheduled (it is scheduled only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * to finish before freeing the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	flush_work(&ring->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) EXPORT_SYMBOL_GPL(tb_ring_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * nhi_mailbox_cmd() - Send a command through NHI mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * @nhi: Pointer to the NHI structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * @cmd: Command to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * @data: Data to be send with the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * Sends mailbox command to the firmware running on NHI. Returns %0 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * case of success and negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	val = ioread32(nhi->iobase + REG_INMAIL_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	val |= REG_INMAIL_OP_REQUEST | cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		val = ioread32(nhi->iobase + REG_INMAIL_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		if (!(val & REG_INMAIL_OP_REQUEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		usleep_range(10, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	} while (ktime_before(ktime_get(), timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (val & REG_INMAIL_OP_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (val & REG_INMAIL_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * nhi_mailbox_mode() - Return current firmware operation mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * @nhi: Pointer to the NHI structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * The function reads current firmware operation mode using NHI mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  * registers and returns it to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	val &= REG_OUTMAIL_CMD_OPMODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	return (enum nhi_fw_mode)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) static void nhi_interrupt_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int value = 0; /* Suppress uninitialized usage warning. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	int hop = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	struct tb_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	spin_lock_irq(&nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 * (TX, RX, RX overflow). We iterate over the bits and read a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	 * dwords as required. The registers are cleared on read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		if (bit % 32 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			value = ioread32(nhi->iobase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 					 + REG_RING_NOTIFY_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 					 + 4 * (bit / 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		if (++hop == nhi->hop_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			hop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			type++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if ((value & (1 << (bit % 32))) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		if (type == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			dev_warn(&nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 				 "RX overflow for ring %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				 hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		if (type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			ring = nhi->tx_rings[hop];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			ring = nhi->rx_rings[hop];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (ring == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			dev_warn(&nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 				 "got interrupt for inactive %s ring %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				 type ? "RX" : "TX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				 hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		spin_lock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		__ring_interrupt(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		spin_unlock(&ring->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	spin_unlock_irq(&nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static irqreturn_t nhi_msi(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct tb_nhi *nhi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	schedule_work(&nhi->interrupt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	ret = tb_domain_suspend_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (nhi->ops && nhi->ops->suspend_noirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) static int nhi_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static int nhi_freeze_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return tb_domain_freeze_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static int nhi_thaw_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	return tb_domain_thaw_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static bool nhi_wake_supported(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	 * If power rails are sustainable for wakeup from S4 this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 * property is set by the BIOS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		return !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static int nhi_poweroff_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	bool wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return __nhi_suspend_noirq(dev, wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static void nhi_enable_int_throttling(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	/* Throttling is specified in 256ns increments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * Configure interrupt throttling for all vectors even if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 * only use few.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	for (i = 0; i < MSIX_MAX_VECS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		u32 reg = REG_INT_THROTTLING_RATE + i * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		iowrite32(throttle, nhi->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) static int nhi_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * Check that the device is still there. It may be that the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * unplugged last device which causes the host controller to go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * away on PCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (!pci_device_is_present(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		nhi->going_away = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		if (nhi->ops && nhi->ops->resume_noirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			ret = nhi->ops->resume_noirq(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		nhi_enable_int_throttling(tb->nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	return tb_domain_resume_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) static int nhi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return tb_domain_suspend(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static void nhi_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 * If we were runtime suspended when system suspend started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	 * schedule runtime resume now. It should bring the domain back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	 * to functional state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (pm_runtime_suspended(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		pm_runtime_resume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		tb_domain_complete(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) static int nhi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	ret = tb_domain_runtime_suspend(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (nhi->ops && nhi->ops->runtime_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		ret = nhi->ops->runtime_suspend(tb->nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static int nhi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (nhi->ops && nhi->ops->runtime_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		ret = nhi->ops->runtime_resume(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	nhi_enable_int_throttling(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	return tb_domain_runtime_resume(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void nhi_shutdown(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	dev_dbg(&nhi->pdev->dev, "shutdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	for (i = 0; i < nhi->hop_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		if (nhi->tx_rings[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			dev_WARN(&nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				 "TX ring %d is still active\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		if (nhi->rx_rings[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			dev_WARN(&nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 				 "RX ring %d is still active\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	nhi_disable_interrupts(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * We have to release the irq before calling flush_work. Otherwise an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * already executing IRQ handler could call schedule_work again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (!nhi->pdev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		flush_work(&nhi->interrupt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	ida_destroy(&nhi->msix_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (nhi->ops && nhi->ops->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		nhi->ops->shutdown(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static int nhi_init_msi(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct pci_dev *pdev = nhi->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	int res, irq, nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	/* In case someone left them on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	nhi_disable_interrupts(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	nhi_enable_int_throttling(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	ida_init(&nhi->msix_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 * get all MSI-X vectors and if we succeed, each ring will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 * one MSI-X. If for some reason that does not work out, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	 * fallback to a single MSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				     PCI_IRQ_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (nvec < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (nvec < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			return nvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		irq = pci_irq_vector(nhi->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		res = devm_request_irq(&pdev->dev, irq, nhi_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 				       IRQF_NO_SUSPEND, "thunderbolt", nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			dev_err(&pdev->dev, "request_irq failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static bool nhi_imr_valid(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		return !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * During suspend the Thunderbolt controller is reset and all PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * tunnels are lost. The NHI driver will try to reestablish all tunnels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * during resume. This adds device links between the tunneled PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * downstream ports and the NHI so that the device core will make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * NHI is resumed first before the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void tb_apple_add_links(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	struct pci_dev *upstream, *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (!x86_apple_machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	switch (nhi->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	upstream = pci_upstream_bridge(nhi->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	while (upstream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		if (!pci_is_pcie(upstream))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		upstream = pci_upstream_bridge(upstream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (!upstream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 * For each hotplug downstream port, create add device link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 * back to NHI so that PCIe tunnels can be re-established after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	for_each_pci_bridge(pdev, upstream->subordinate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		const struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		if (!pci_is_pcie(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		    !pdev->is_hotplug_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		link = device_link_add(&pdev->dev, &nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 				       DL_FLAG_AUTOREMOVE_SUPPLIER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				       DL_FLAG_PM_RUNTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		if (link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			dev_dbg(&nhi->pdev->dev, "created link from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 				dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				 dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	struct tb_nhi *nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (!nhi_imr_valid(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	res = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (!nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	nhi->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	/* cannot fail - table is allocated bin pcim_iomap_regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	nhi->iobase = pcim_iomap_table(pdev)[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 				     sizeof(*nhi->tx_rings), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				     sizeof(*nhi->rx_rings), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	if (!nhi->tx_rings || !nhi->rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	res = nhi_init_msi(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	spin_lock_init(&nhi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		dev_err(&pdev->dev, "failed to set DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (nhi->ops && nhi->ops->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		res = nhi->ops->init(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	tb_apple_add_links(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	tb_acpi_add_links(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	tb = icm_probe(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (!tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		tb = tb_probe(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (!tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		dev_err(&nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			"failed to determine connection manager, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	res = tb_domain_add(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		 * At this point the RX/TX rings might already have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		 * activated. Do a proper shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		tb_domain_put(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		nhi_shutdown(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	pci_set_drvdata(pdev, tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	device_wakeup_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	pm_runtime_allow(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	pm_runtime_put_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static void nhi_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct tb *tb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	pm_runtime_dont_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	pm_runtime_forbid(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	tb_domain_remove(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	nhi_shutdown(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * resume_noirq until we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static const struct dev_pm_ops nhi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	.suspend_noirq = nhi_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	.resume_noirq = nhi_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	.freeze_noirq = nhi_freeze_noirq,  /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 					    * we just disable hotplug, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 					    * pci-tunnels stay alive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 					    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	.thaw_noirq = nhi_thaw_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	.restore_noirq = nhi_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	.suspend = nhi_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	.poweroff_noirq = nhi_poweroff_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	.poweroff = nhi_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	.complete = nhi_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	.runtime_suspend = nhi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	.runtime_resume = nhi_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static struct pci_device_id nhi_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	 * We have to specify class, the TB bridges use the same device and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	 * vendor (sub)id on gen 1 and gen 2 controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		.vendor = PCI_VENDOR_ID_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		.subvendor = 0x2222, .subdevice = 0x1111,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		.vendor = PCI_VENDOR_ID_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		.subvendor = 0x2222, .subdevice = 0x1111,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		.vendor = PCI_VENDOR_ID_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		.vendor = PCI_VENDOR_ID_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	/* Thunderbolt 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/* Any USB4 compliant host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	{ 0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) MODULE_DEVICE_TABLE(pci, nhi_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static struct pci_driver nhi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	.name = "thunderbolt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	.id_table = nhi_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	.probe = nhi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	.remove = nhi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	.shutdown = nhi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	.driver.pm = &nhi_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static int __init nhi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	ret = tb_domain_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	ret = pci_register_driver(&nhi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		tb_domain_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static void __exit nhi_unload(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	pci_unregister_driver(&nhi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	tb_domain_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) rootfs_initcall(nhi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) module_exit(nhi_unload);