^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2018-2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _GSI_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _GSI_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* Maximum number of channels and event rings supported by the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define GSI_CHANNEL_COUNT_MAX 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define GSI_EVT_RING_COUNT_MAX 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define GSI_TLV_MAX 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct platform_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct gsi_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct gsi_channel_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct ipa_gsi_endpoint_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Execution environment IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) enum gsi_ee_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) GSI_EE_AP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) GSI_EE_MODEM = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) GSI_EE_UC = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) GSI_EE_TZ = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct gsi_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void *virt; /* ring array base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dma_addr_t addr; /* primarily low 32 bits used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 count; /* number of elements in ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* The ring index value indicates the next "open" entry in the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * A channel ring consists of TRE entries filled by the AP and passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * to the hardware for processing. For a channel ring, the ring index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * identifies the next unused entry to be filled by the AP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * An event ring consists of event structures filled by the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * and passed to the AP. For event rings, the ring index identifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * the next ring entry that is not known to have been filled by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Transactions use several resources that can be allocated dynamically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * but taken from a fixed-size pool. The number of elements required for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * the pool is limited by the total number of TREs that can be outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * If sufficient TREs are available to reserve for a transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * allocation from these pools is guaranteed to succeed. Furthermore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * these resources are implicitly freed whenever the TREs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * transaction they're associated with are released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * The result of a pool allocation of multiple elements is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * contiguous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct gsi_trans_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void *base; /* base address of element pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 count; /* # elements in the pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 free; /* next free element in pool (modulo) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 size; /* size (bytes) of an element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 max_alloc; /* max allocation request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dma_addr_t addr; /* DMA address if DMA pool (or 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct gsi_trans_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) atomic_t tre_avail; /* TREs available for allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct gsi_trans_pool pool; /* transaction pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct gsi_trans_pool sg_pool; /* scatterlist pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct gsi_trans_pool info_pool;/* command information pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct gsi_trans **map; /* TRE -> transaction map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) spinlock_t spinlock; /* protects updates to the lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct list_head alloc; /* allocated, not committed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct list_head pending; /* committed, awaiting completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct list_head complete; /* completed, awaiting poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct list_head polled; /* returned by gsi_channel_poll_one() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Hardware values signifying the state of a channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) enum gsi_channel_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) GSI_CHANNEL_STATE_ALLOCATED = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) GSI_CHANNEL_STATE_STARTED = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) GSI_CHANNEL_STATE_STOPPED = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) GSI_CHANNEL_STATE_ERROR = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* We only care about channels between IPA and AP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct gsi_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct gsi *gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bool toward_ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bool command; /* AP command TX channel or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bool use_prefetch; /* use prefetch (else escape buf) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u8 tlv_count; /* # entries in TLV FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u16 tre_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u16 event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct completion completion; /* signals channel command completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct gsi_ring tre_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 evt_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u64 byte_count; /* total # bytes transferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u64 trans_count; /* total # transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* The following counts are used only for TX endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u64 queued_byte_count; /* last reported queued byte count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 queued_trans_count; /* ...and queued trans count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u64 compl_byte_count; /* last reported completed byte count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u64 compl_trans_count; /* ...and completed trans count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct gsi_trans_info trans_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Hardware values signifying the state of an event ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum gsi_evt_ring_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) GSI_EVT_RING_STATE_ALLOCATED = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) GSI_EVT_RING_STATE_ERROR = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct gsi_evt_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct gsi_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct completion completion; /* signals event ring state changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) enum gsi_evt_ring_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct gsi_ring ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct gsi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct device *dev; /* Same as IPA device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct net_device dummy_dev; /* needed for NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void __iomem *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u32 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 channel_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 evt_ring_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 event_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 event_enable_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 modem_channel_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct completion completion; /* for global EE commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct mutex mutex; /* protects commands, programming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * gsi_setup() - Set up the GSI subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @gsi: Address of GSI structure embedded in an IPA structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @legacy: Set up for legacy hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Return: 0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Performs initialization that must wait until the GSI hardware is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * ready (including firmware loaded).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int gsi_setup(struct gsi *gsi, bool legacy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * gsi_teardown() - Tear down GSI subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @gsi: GSI address previously passed to a successful gsi_setup() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void gsi_teardown(struct gsi *gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @gsi: GSI pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @channel_id: Channel whose limit is to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Return: The maximum number of TREs oustanding on the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @gsi: GSI pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @channel_id: Channel whose limit is to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Return: The maximum TRE count per transaction on the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * gsi_channel_start() - Start an allocated GSI channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @gsi: GSI pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @channel_id: Channel to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Return: 0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int gsi_channel_start(struct gsi *gsi, u32 channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * gsi_channel_stop() - Stop a started GSI channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @gsi: GSI pointer returned by gsi_setup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @channel_id: Channel to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Return: 0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * gsi_channel_reset() - Reset an allocated GSI channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @gsi: GSI pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @channel_id: Channel to be reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @legacy: Legacy behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Reset a channel and reconfigure it. The @legacy flag indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * that some steps should be done differently for legacy hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * GSI hardware relinquishes ownership of all pending receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * transactions and they will complete with their cancelled flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * gsi_init() - Initialize the GSI subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @gsi: Address of GSI structure embedded in an IPA structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @pdev: IPA platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Return: 0 if successful, or a negative error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Early stage initialization of the GSI subsystem, performing tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * that can be done before the GSI hardware is ready to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u32 count, const struct ipa_gsi_endpoint_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) bool modem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * gsi_exit() - Exit the GSI subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * @gsi: GSI address previously passed to a successful gsi_init() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void gsi_exit(struct gsi *gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #endif /* _GSI_H_ */